-
-
Notifications
You must be signed in to change notification settings - Fork 3.9k
Garbage collect event memory #4832
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -57,6 +57,42 @@ struct EventInstance<E: Event> { | |||||
pub event: E, | ||||||
} | ||||||
|
||||||
/// Settings for controlling the general behavior of [`Events<T>`]. | ||||||
pub struct EventSettings<T> { | ||||||
/// Controls how and when the memory allocated for events will be freed | ||||||
pub garbage_collection: EventGarbageCollection, | ||||||
marker_: PhantomData<fn() -> T>, | ||||||
} | ||||||
|
||||||
impl<T> Default for EventSettings<T> { | ||||||
fn default() -> Self { | ||||||
Self { | ||||||
garbage_collection: Default::default(), | ||||||
marker_: PhantomData, | ||||||
} | ||||||
} | ||||||
} | ||||||
|
||||||
/// Settings for controlling the garbage collection behavior of [`Events<T>`]. | ||||||
pub enum EventGarbageCollection { | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I really like this flavor of design; there are a number of other settings that I'd like to be able to configure for events (e.g. smallvec usage and cleanup strategy). As a result, I think we should refactor add_event to use a generic plugin under the hood. |
||||||
/// Exponentially decays the amount of allocated memory for the backing event buffer | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
/// with a configured minimum. This is the default. | ||||||
ExponentialFalloff { | ||||||
/// Each internal vec never goes below this value. | ||||||
/// There are two internal vecs, so the total minimum capacity is 2 * min_capacity. | ||||||
/// This defaults to 0. | ||||||
min_capacity: usize, | ||||||
}, | ||||||
/// Disables garbage collection entirely. | ||||||
None, | ||||||
} | ||||||
|
||||||
impl Default for EventGarbageCollection { | ||||||
fn default() -> Self { | ||||||
Self::ExponentialFalloff { min_capacity: 0 } | ||||||
} | ||||||
} | ||||||
|
||||||
/// An event collection that represents the events that occurred within the last two | ||||||
/// [`Events::update`] calls. | ||||||
/// Events can be written to using an [`EventWriter`] | ||||||
|
@@ -82,7 +118,7 @@ struct EventInstance<E: Event> { | |||||
/// | ||||||
/// # Example | ||||||
/// ``` | ||||||
/// use bevy_ecs::event::Events; | ||||||
/// use bevy_ecs::event::*; | ||||||
/// | ||||||
/// struct MyEvent { | ||||||
/// value: usize | ||||||
|
@@ -91,9 +127,10 @@ struct EventInstance<E: Event> { | |||||
/// // setup | ||||||
/// let mut events = Events::<MyEvent>::default(); | ||||||
/// let mut reader = events.get_reader(); | ||||||
/// let settings = EventSettings::<MyEvent>::default(); | ||||||
/// | ||||||
/// // run this once per update/frame | ||||||
/// events.update(); | ||||||
/// events.update(&settings); | ||||||
/// | ||||||
/// // somewhere else: send an event | ||||||
/// events.send(MyEvent { value: 1 }); | ||||||
|
@@ -437,9 +474,19 @@ impl<E: Event> Events<E> { | |||||
|
||||||
/// Swaps the event buffers and clears the oldest event buffer. In general, this should be | ||||||
/// called once per frame/update. | ||||||
pub fn update(&mut self) { | ||||||
pub fn update(&mut self, settings: &EventSettings<E>) { | ||||||
std::mem::swap(&mut self.events_a, &mut self.events_b); | ||||||
// Garbage collect unused event space. Shrink after clear to avoid a copy. | ||||||
let new_capacity = match settings.garbage_collection { | ||||||
EventGarbageCollection::ExponentialFalloff { min_capacity } => self | ||||||
.events_b | ||||||
.len() | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't know how More generally the exponential decay here is very aggressive. Given the cost of allocations compared the relative benefit of recovering memory, it's probably worth having a smoother heuristic which observes the trend over several frames before kicking in some GC operation. Or at least make that an option, and make that option the default one, and leave that aggressive instant-trigger decay variant for those few |
||||||
.max(self.events_b.capacity() / 2) | ||||||
.max(min_capacity), | ||||||
EventGarbageCollection::None => self.events_b.capacity(), | ||||||
}; | ||||||
self.events_b.clear(); | ||||||
self.events_b.shrink_to(new_capacity); | ||||||
self.events_b.start_event_count = self.event_count; | ||||||
debug_assert_eq!( | ||||||
self.events_a.start_event_count + self.events_a.len(), | ||||||
|
@@ -448,8 +495,12 @@ impl<E: Event> Events<E> { | |||||
} | ||||||
|
||||||
/// A system that calls [`Events::update`] once per frame. | ||||||
pub fn update_system(mut events: ResMut<Self>) { | ||||||
events.update(); | ||||||
pub fn update_system(mut events: ResMut<Self>, settings: Option<Res<EventSettings<E>>>) { | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a nice use of an optional resource; I like the way it avoids polluting the resource list with default values. |
||||||
if let Some(settings) = settings { | ||||||
events.update(&settings); | ||||||
} else { | ||||||
events.update(&Default::default()); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does |
||||||
} | ||||||
} | ||||||
|
||||||
#[inline] | ||||||
|
@@ -598,7 +649,7 @@ mod tests { | |||||
"reader_a receives next unread event" | ||||||
); | ||||||
|
||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
|
||||||
let mut reader_d = events.get_reader(); | ||||||
|
||||||
|
@@ -620,7 +671,7 @@ mod tests { | |||||
"reader_d receives all events created before and after update" | ||||||
); | ||||||
|
||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
|
||||||
assert_eq!( | ||||||
get_events(&events, &mut reader_missed), | ||||||
|
@@ -654,7 +705,7 @@ mod tests { | |||||
assert!(reader.iter(&events).next().is_none()); | ||||||
|
||||||
events.send(E(2)); | ||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
events.send(E(3)); | ||||||
|
||||||
assert!(reader.iter(&events).eq([E(2), E(3)].iter())); | ||||||
|
@@ -691,12 +742,12 @@ mod tests { | |||||
events.send(TestEvent { i: 0 }); | ||||||
assert!(!events.is_empty()); | ||||||
|
||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
assert!(!events.is_empty()); | ||||||
|
||||||
// events are only empty after the second call to update | ||||||
// due to double buffering. | ||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
assert!(events.is_empty()); | ||||||
} | ||||||
|
||||||
|
@@ -750,12 +801,12 @@ mod tests { | |||||
events.send(TestEvent { i: 0 }); | ||||||
let reader = events.get_reader(); | ||||||
assert_eq!(reader.len(&events), 2); | ||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
events.send(TestEvent { i: 0 }); | ||||||
assert_eq!(reader.len(&events), 3); | ||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
assert_eq!(reader.len(&events), 1); | ||||||
events.update(); | ||||||
events.update(&Default::default()); | ||||||
assert!(reader.is_empty(&events)); | ||||||
} | ||||||
|
||||||
|
Uh oh!
There was an error while loading. Please reload this page.