diff --git a/Cargo.toml b/Cargo.toml index b5d5dfc621..0ac40920c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ mimalloc-sys = { version = "0.1.6", optional = true } mmtk-macros = { version = "0.20.0", path = "macros/" } num_cpus = "1.8" num-traits = "0.2" +peace-lock = "0.1.3" pfm = { version = "0.1.0-beta.3", optional = true } probe = "0.5" portable-atomic = "1.4.3" @@ -72,6 +73,9 @@ perf_counter = ["pfm"] # Do not modify the following line - ci-common.sh matches it # -- Non mutually exclusive features -- +# Turn on checks for the type ArcFlexMut (we use peace-lock for the type so just turn on check for peace-lock). +check_flex_mut = ["peace-lock/check"] + # spaces with different semantics # A VM-allocated/managed space. A binding could use this for their boot image, metadata space, etc. @@ -129,7 +133,7 @@ nogc_no_zeroing = ["nogc_lock_free"] single_worker = [] # To run expensive comprehensive runtime checks, such as checking duplicate edges -extreme_assertions = [] +extreme_assertions = ["check_flex_mut"] # Enable multiple spaces for NoGC, each allocator maps to an individual ImmortalSpace. nogc_multi_space = [] diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs b/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs index 29be4f6184..556f65c1de 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs @@ -61,15 +61,15 @@ impl ProcessEdgesWork for MyGCProcessEdges { } let worker = self.worker(); let queue = &mut self.base.nodes; - if self.plan.tospace().in_space(object) { - self.plan.tospace().trace_object( + if self.plan.tospace().read().in_space(object) { + self.plan.tospace().read().trace_object( queue, object, Some(CopySemantics::DefaultCopy), worker, ) - } else if self.plan.fromspace().in_space(object) { - self.plan.fromspace().trace_object( + } else if self.plan.fromspace().read().in_space(object) { + self.plan.fromspace().read().trace_object( queue, object, Some(CopySemantics::DefaultCopy), diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index a7e6fe7485..c5af0a8109 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -15,6 +15,7 @@ use crate::util::copy::*; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::VMBinding; use enum_map::EnumMap; use std::sync::atomic::{AtomicBool, Ordering}; // Add @@ -32,10 +33,10 @@ pub struct MyGC { pub hi: AtomicBool, #[space] #[copy_semantics(CopySemantics::DefaultCopy)] - pub copyspace0: CopySpace, + pub copyspace0: ArcFlexMut>, #[space] #[copy_semantics(CopySemantics::DefaultCopy)] - pub copyspace1: CopySpace, + pub copyspace1: ArcFlexMut>, #[parent] pub common: CommonPlan, } @@ -66,7 +67,7 @@ impl Plan for MyGC { }, space_mapping: vec![ // The tospace argument doesn't matter, we will rebind before a GC anyway. - (CopySelector::CopySpace(0), &self.copyspace0) + (CopySelector::CopySpace(0), self.copyspace0.clone().into_dyn_space()) ], constraints: &MYGC_CONSTRAINTS, } @@ -92,48 +93,48 @@ impl Plan for MyGC { // Modify // ANCHOR: prepare - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { self.common.prepare(tls, true); self.hi .store(!self.hi.load(Ordering::SeqCst), Ordering::SeqCst); // Flips 'hi' to flip space definitions let hi = self.hi.load(Ordering::SeqCst); - self.copyspace0.prepare(hi); - self.copyspace1.prepare(!hi); + self.copyspace0.read().prepare(hi); + self.copyspace1.read().prepare(!hi); - self.fromspace_mut() + self.fromspace().write() .set_copy_for_sft_trace(Some(CopySemantics::DefaultCopy)); - self.tospace_mut().set_copy_for_sft_trace(None); + self.tospace().write().set_copy_for_sft_trace(None); } // ANCHOR_END: prepare // Add // ANCHOR: prepare_worker fn prepare_worker(&self, worker: &mut GCWorker) { - unsafe { worker.get_copy_context_mut().copy[0].assume_init_mut() }.rebind(self.tospace()); + unsafe { worker.get_copy_context_mut().copy[0].assume_init_mut() }.rebind(self.tospace().clone()); } // ANCHOR_END: prepare_worker // Modify // ANCHOR: release - fn release(&mut self, tls: VMWorkerThread) { + fn release(&self, tls: VMWorkerThread) { self.common.release(tls, true); - self.fromspace().release(); + self.fromspace().read().release(); } // ANCHOR_END: release // Modify // ANCHOR: plan_get_collection_reserve fn get_collection_reserved_pages(&self) -> usize { - self.tospace().reserved_pages() + self.tospace().read().reserved_pages() } // ANCHOR_END: plan_get_collection_reserve // Modify // ANCHOR: plan_get_used_pages fn get_used_pages(&self) -> usize { - self.tospace().reserved_pages() + self.common.get_used_pages() + self.tospace().read().reserved_pages() + self.common.get_used_pages() } // ANCHOR_END: plan_get_used_pages @@ -170,9 +171,9 @@ impl MyGC { let res = MyGC { hi: AtomicBool::new(false), // ANCHOR: copyspace_new - copyspace0: CopySpace::new(plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false), + copyspace0: ArcFlexMut::new(CopySpace::new(plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false)), // ANCHOR_END: copyspace_new - copyspace1: CopySpace::new(plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true), + copyspace1: ArcFlexMut::new(CopySpace::new(plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true)), common: CommonPlan::new(plan_args), }; @@ -183,7 +184,7 @@ impl MyGC { // ANCHOR_END: plan_new // ANCHOR: plan_space_access - pub fn tospace(&self) -> &CopySpace { + pub fn tospace(&self) -> &ArcFlexMut> { if self.hi.load(Ordering::SeqCst) { &self.copyspace1 } else { @@ -191,28 +192,12 @@ impl MyGC { } } - pub fn fromspace(&self) -> &CopySpace { + pub fn fromspace(&self) -> &ArcFlexMut> { if self.hi.load(Ordering::SeqCst) { &self.copyspace0 } else { &self.copyspace1 } } - - pub fn tospace_mut(&mut self) -> &mut CopySpace { - if self.hi.load(Ordering::SeqCst) { - &mut self.copyspace1 - } else { - &mut self.copyspace0 - } - } - - pub fn fromspace_mut(&mut self) -> &mut CopySpace { - if self.hi.load(Ordering::SeqCst) { - &mut self.copyspace0 - } else { - &mut self.copyspace1 - } - } // ANCHOR_END: plan_space_access } diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs b/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs index 9cd110392f..aeddb9ca85 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs @@ -44,7 +44,7 @@ pub fn mygc_mutator_release( .plan .downcast_ref::>() .unwrap() - .tospace(), + .tospace().clone().into_dyn_space(), ); } // ANCHOR_END: release @@ -78,7 +78,7 @@ pub fn create_mygc_mutator( // ANCHOR: space_mapping space_mapping: Box::new({ let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, mygc); - vec.push((AllocatorSelector::BumpPointer(0), mygc.tospace())); + vec.push((AllocatorSelector::BumpPointer(0), mygc.tospace().clone().into_dyn_space())); vec }), // ANCHOR_END: space_mapping diff --git a/docs/userguide/src/tutorial/mygc/ss/alloc.md b/docs/userguide/src/tutorial/mygc/ss/alloc.md index 1da743a41a..46c86ad37f 100644 --- a/docs/userguide/src/tutorial/mygc/ss/alloc.md +++ b/docs/userguide/src/tutorial/mygc/ss/alloc.md @@ -56,8 +56,11 @@ Change `pub struct MyGC` to add new instance variables. 1. Delete the existing fields in the constructor. 2. Add `pub hi: AtomicBool,`. This is a thread-safe bool, indicating which copyspace is the tospace. - 3. Add `pub copyspace0: CopySpace,` - and `pub copyspace1: CopySpace,`. These are the two copyspaces. + 3. Add `pub copyspace0: ArcFlexMut>,` + and `pub copyspace1: ArcFlexMut>,`. These are the two copyspaces. + We use the type `ArcFlexMut` from `mmtk::util::rust_util::flex_mut`, which + allows us to share the reference among different types and allows us to + flexibly acquire mutable references. 4. Add `pub common: CommonPlan,`. This holds an instance of the common plan. diff --git a/macros/src/has_spaces_impl.rs b/macros/src/has_spaces_impl.rs index 71286e415d..53833686bb 100644 --- a/macros/src/has_spaces_impl.rs +++ b/macros/src/has_spaces_impl.rs @@ -45,11 +45,13 @@ pub(crate) fn generate_impl_items<'a>( let f_ident = f.ident.as_ref().unwrap(); let visitor = quote! { - __func(&self.#f_ident); + let space = self.#f_ident.read(); + __func(&*space); }; let visitor_mut = quote! { - __func(&mut self.#f_ident); + let mut space = self.#f_ident.write(); + __func(&mut *space); }; space_visitors.push(visitor); @@ -75,7 +77,7 @@ pub(crate) fn generate_impl_items<'a>( #parent_visitor } - fn for_each_space_mut(&mut self, __func: &mut dyn FnMut(&mut dyn Space)) { + fn for_each_space_mut(&self, __func: &mut dyn FnMut(&mut dyn Space)) { #(#space_visitors_mut)* #parent_visitor_mut } diff --git a/macros/src/plan_trace_object_impl.rs b/macros/src/plan_trace_object_impl.rs index 7be2e44c9a..44fcd3f52d 100644 --- a/macros/src/plan_trace_object_impl.rs +++ b/macros/src/plan_trace_object_impl.rs @@ -45,7 +45,6 @@ pub(crate) fn generate_trace_object<'a>( // Generate a check with early return for each space let space_field_handler = space_fields.iter().map(|f| { let f_ident = f.ident.as_ref().unwrap(); - let f_ty = &f.ty; // Figure out copy let maybe_copy_semantics_attr = util::get_field_attribute(f, "copy_semantics"); @@ -71,8 +70,11 @@ pub(crate) fn generate_trace_object<'a>( }; quote! { - if self.#f_ident.in_space(__mmtk_objref) { - return <#f_ty as PolicyTraceObject #ty_generics>::trace_object::(&self.#f_ident, __mmtk_queue, __mmtk_objref, #copy, __mmtk_worker); + { + let space = self.#f_ident.read(); + if space.in_space(__mmtk_objref) { + return PolicyTraceObject::trace_object::(&*space, __mmtk_queue, __mmtk_objref, #copy, __mmtk_worker); + } } } }); @@ -108,13 +110,15 @@ pub(crate) fn generate_post_scan_object<'a>( ) -> TokenStream2 { let scan_field_handler = post_scan_object_fields.iter().map(|f| { let f_ident = f.ident.as_ref().unwrap(); - let f_ty = &f.ty; quote! { - if self.#f_ident.in_space(__mmtk_objref) { - use crate::policy::gc_work::PolicyTraceObject; - <#f_ty as PolicyTraceObject #ty_generics>::post_scan_object(&self.#f_ident, __mmtk_objref); - return; + { + let space = self.#f_ident.read(); + if space.in_space(__mmtk_objref) { + use crate::policy::gc_work::PolicyTraceObject; + PolicyTraceObject::post_scan_object(&*space, __mmtk_objref); + return; + } } } }); @@ -148,10 +152,23 @@ pub(crate) fn generate_may_move_objects<'a>( ) -> TokenStream2 { // If any space or the parent may move objects, the plan may move objects let space_handlers = space_fields.iter().map(|f| { - let f_ty = &f.ty; - - quote! { - || <#f_ty as PolicyTraceObject #ty_generics>::may_move_objects::() + use syn::{Type, PathArguments}; + // We assume the space field is `ArcFlexMut` + if let Type::Path(type_path) = &f.ty { + if type_path.path.segments[0].ident == "ArcFlexMut" { + if let PathArguments::AngleBracketed(angle_bracketed_args) = &type_path.path.segments[0].arguments { + let inner_type = &angle_bracketed_args.args.first().unwrap(); + quote! { + || <#inner_type as PolicyTraceObject #ty_generics>::may_move_objects::() + } + } else { + unreachable!("Failed to get the inner type of ArcFlexMut: {:?}", f.ty) + } + } else { + panic!("Expected a space to be ArcFlexMut, found {:?}", f.ty) + } + } else { + panic!("Failed to get the type of a space: {:?}", f.ty) } }); diff --git a/src/memory_manager.rs b/src/memory_manager.rs index f9f0fb25ce..c9edcd5143 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -92,9 +92,10 @@ pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { /// Currently we do not allow removing regions from VM space. #[cfg(feature = "vm_space")] pub fn set_vm_space(mmtk: &'static mut MMTK, start: Address, size: usize) { - unsafe { mmtk.get_plan_mut() } - .base_mut() + mmtk.get_plan() + .base() .vm_space + .write() .set_vm_region(start, size); } diff --git a/src/mmtk.rs b/src/mmtk.rs index 12f3e77cba..3cb28e96c7 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -23,7 +23,6 @@ use crate::util::sanity::sanity_checker::SanityChecker; use crate::util::statistics::stats::Stats; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; -use std::cell::UnsafeCell; use std::default::Default; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -107,7 +106,7 @@ impl Default for MMTKBuilder { pub struct MMTK { pub(crate) options: Arc, pub(crate) state: Arc, - pub(crate) plan: UnsafeCell>>, + pub(crate) plan: Box>, pub(crate) reference_processors: ReferenceProcessors, pub(crate) finalizable_processor: Mutex>::FinalizableType>>, @@ -198,7 +197,7 @@ impl MMTK { MMTK { options, state, - plan: UnsafeCell::new(plan), + plan, reference_processors: ReferenceProcessors::new(), finalizable_processor: Mutex::new(FinalizableProcessor::< >::FinalizableType, @@ -339,17 +338,7 @@ impl MMTK { } pub fn get_plan(&self) -> &dyn Plan { - unsafe { &**(self.plan.get()) } - } - - /// Get the plan as mutable reference. - /// - /// # Safety - /// - /// This is unsafe because the caller must ensure that the plan is not used by other threads. - #[allow(clippy::mut_from_ref)] - pub unsafe fn get_plan_mut(&self) -> &mut dyn Plan { - &mut **(self.plan.get()) + &*self.plan } pub fn get_options(&self) -> &Options { diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index c0cb53bfde..bba391131d 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -17,6 +17,7 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::VMRequest; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -34,10 +35,10 @@ pub struct GenCopy { pub hi: AtomicBool, #[space] #[copy_semantics(CopySemantics::Mature)] - pub copyspace0: CopySpace, + pub copyspace0: ArcFlexMut>, #[space] #[copy_semantics(CopySemantics::Mature)] - pub copyspace1: CopySpace, + pub copyspace1: ArcFlexMut>, } pub const GENCOPY_CONSTRAINTS: PlanConstraints = crate::plan::generational::GEN_CONSTRAINTS; @@ -57,7 +58,10 @@ impl Plan for GenCopy { }, space_mapping: vec![ // The tospace argument doesn't matter, we will rebind before a GC anyway. - (CopySelector::CopySpace(0), self.tospace()), + ( + CopySelector::CopySpace(0), + self.tospace().clone().into_dyn_space(), + ), ], constraints: &GENCOPY_CONSTRAINTS, } @@ -83,7 +87,7 @@ impl Plan for GenCopy { &ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.prepare(tls); if full_heap { @@ -91,37 +95,39 @@ impl Plan for GenCopy { .store(!self.hi.load(Ordering::SeqCst), Ordering::SeqCst); // flip the semi-spaces } let hi = self.hi.load(Ordering::SeqCst); - self.copyspace0.prepare(hi); - self.copyspace1.prepare(!hi); + self.copyspace0.read().prepare(hi); + self.copyspace1.read().prepare(!hi); - self.fromspace_mut() + self.fromspace() + .write() .set_copy_for_sft_trace(Some(CopySemantics::Mature)); - self.tospace_mut().set_copy_for_sft_trace(None); + self.tospace().write().set_copy_for_sft_trace(None); } fn prepare_worker(&self, worker: &mut GCWorker) { - unsafe { worker.get_copy_context_mut().copy[0].assume_init_mut() }.rebind(self.tospace()); + unsafe { worker.get_copy_context_mut().copy[0].assume_init_mut() } + .rebind(self.tospace().clone()); } - fn release(&mut self, tls: VMWorkerThread) { + fn release(&self, tls: VMWorkerThread) { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.release(tls); if full_heap { - self.fromspace().release(); + self.fromspace().read().release(); } } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&self, _tls: VMWorkerThread) { self.gen .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); } fn get_collection_reserved_pages(&self) -> usize { - self.gen.get_collection_reserved_pages() + self.tospace().reserved_pages() + self.gen.get_collection_reserved_pages() + self.tospace().read().reserved_pages() } fn get_used_pages(&self) -> usize { - self.gen.get_used_pages() + self.tospace().reserved_pages() + self.gen.get_used_pages() + self.tospace().read().reserved_pages() } /// Return the number of pages available for allocation. Assuming all future allocations goes to nursery. @@ -156,19 +162,19 @@ impl GenerationalPlan for GenCopy { } fn is_object_in_nursery(&self, object: ObjectReference) -> bool { - self.gen.nursery.in_space(object) + self.gen.nursery.read().in_space(object) } fn is_address_in_nursery(&self, addr: Address) -> bool { - self.gen.nursery.address_in_space(addr) + self.gen.nursery.read().address_in_space(addr) } fn get_mature_physical_pages_available(&self) -> usize { - self.tospace().available_physical_pages() + self.tospace().read().available_physical_pages() } fn get_mature_reserved_pages(&self) -> usize { - self.tospace().reserved_pages() + self.tospace().read().reserved_pages() } fn force_full_heap_collection(&self) { @@ -200,14 +206,14 @@ impl GenCopy { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let copyspace0 = CopySpace::new( + let copyspace0 = ArcFlexMut::new(CopySpace::new( plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false, - ); - let copyspace1 = CopySpace::new( + )); + let copyspace1 = ArcFlexMut::new(CopySpace::new( plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true, - ); + )); let res = GenCopy { gen: CommonGenPlan::new(plan_args), @@ -225,7 +231,7 @@ impl GenCopy { self.gen.requires_full_heap_collection(self) } - pub fn tospace(&self) -> &CopySpace { + pub fn tospace(&self) -> &ArcFlexMut> { if self.hi.load(Ordering::SeqCst) { &self.copyspace1 } else { @@ -233,27 +239,11 @@ impl GenCopy { } } - pub fn tospace_mut(&mut self) -> &mut CopySpace { - if self.hi.load(Ordering::SeqCst) { - &mut self.copyspace1 - } else { - &mut self.copyspace0 - } - } - - pub fn fromspace(&self) -> &CopySpace { + pub fn fromspace(&self) -> &ArcFlexMut> { if self.hi.load(Ordering::SeqCst) { &self.copyspace0 } else { &self.copyspace1 } } - - pub fn fromspace_mut(&mut self) -> &mut CopySpace { - if self.hi.load(Ordering::SeqCst) { - &mut self.copyspace0 - } else { - &mut self.copyspace1 - } - } } diff --git a/src/plan/generational/copying/mutator.rs b/src/plan/generational/copying/mutator.rs index 668ccb2cc9..88eb4a1b05 100644 --- a/src/plan/generational/copying/mutator.rs +++ b/src/plan/generational/copying/mutator.rs @@ -34,7 +34,7 @@ pub fn create_gencopy_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new(create_gen_space_mapping( mmtk.get_plan(), - &gencopy.gen.nursery, + gencopy.gen.nursery.clone(), )), prepare_func: &unreachable_prepare_func, release_func: &gencopy_mutator_release, diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index e12430f576..e86e1fa9d4 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -7,6 +7,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::copy::CopySemantics; use crate::util::heap::VMRequest; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::statistics::counter::EventCounter; use crate::util::Address; use crate::util::ObjectReference; @@ -25,7 +26,7 @@ pub struct CommonGenPlan { /// The nursery space. #[space] #[copy_semantics(CopySemantics::PromoteToMature)] - pub nursery: CopySpace, + pub nursery: ArcFlexMut>, /// The common plan. #[parent] pub common: CommonPlan, @@ -53,7 +54,7 @@ impl CommonGenPlan { let common = CommonPlan::new(args); CommonGenPlan { - nursery, + nursery: ArcFlexMut::new(nursery), common, gc_full_heap: AtomicBool::default(), next_gc_full_heap: AtomicBool::new(false), @@ -62,22 +63,22 @@ impl CommonGenPlan { } /// Prepare Gen. This should be called by a single thread in GC prepare work. - pub fn prepare(&mut self, tls: VMWorkerThread) { + pub fn prepare(&self, tls: VMWorkerThread) { let full_heap = !self.is_current_gc_nursery(); if full_heap { self.full_heap_gc_count.lock().unwrap().inc(); } self.common.prepare(tls, full_heap); - self.nursery.prepare(true); - self.nursery - .set_copy_for_sft_trace(Some(CopySemantics::PromoteToMature)); + let mut nursery = self.nursery.write(); + nursery.prepare(true); + nursery.set_copy_for_sft_trace(Some(CopySemantics::PromoteToMature)); } /// Release Gen. This should be called by a single thread in GC release work. - pub fn release(&mut self, tls: VMWorkerThread) { + pub fn release(&self, tls: VMWorkerThread) { let full_heap = !self.is_current_gc_nursery(); self.common.release(tls, full_heap); - self.nursery.release(); + self.nursery.write().release(); } /// Independent of how many pages remain in the page budget (a function of heap size), we must @@ -100,7 +101,7 @@ impl CommonGenPlan { space_full: bool, space: Option<&dyn Space>, ) -> bool { - let cur_nursery = self.nursery.reserved_pages(); + let cur_nursery = self.nursery.read().reserved_pages(); let max_nursery = self.common.base.options.get_max_nursery_pages(); let nursery_full = cur_nursery >= max_nursery; trace!( @@ -120,7 +121,7 @@ impl CommonGenPlan { // - if space is none, it is not. Return false immediately. // - if space is some, we further check its descriptor. let is_triggered_by_nursery = space.map_or(false, |s| { - s.common().descriptor == self.nursery.common().descriptor + s.common().descriptor == self.nursery.read().common().descriptor }); // If space is full and the GC is not triggered by nursery, next GC will be full heap GC. if space_full && !is_triggered_by_nursery { @@ -213,13 +214,16 @@ impl CommonGenPlan { object: ObjectReference, worker: &mut GCWorker, ) -> ObjectReference { - if self.nursery.in_space(object) { - return self.nursery.trace_object::( - queue, - object, - Some(CopySemantics::PromoteToMature), - worker, - ); + { + let nursery = self.nursery.read(); + if nursery.in_space(object) { + return nursery.trace_object::( + queue, + object, + Some(CopySemantics::PromoteToMature), + worker, + ); + } } self.common.trace_object::(queue, object, worker) } @@ -232,17 +236,23 @@ impl CommonGenPlan { worker: &mut GCWorker, ) -> ObjectReference { // Evacuate nursery objects - if self.nursery.in_space(object) { - return self.nursery.trace_object::( - queue, - object, - Some(CopySemantics::PromoteToMature), - worker, - ); + { + let nursery = self.nursery.read(); + if nursery.in_space(object) { + return nursery.trace_object::( + queue, + object, + Some(CopySemantics::PromoteToMature), + worker, + ); + } } // We may alloc large object into LOS as nursery objects. Trace them here. - if self.common.get_los().in_space(object) { - return self.common.get_los().trace_object::(queue, object); + { + let los = self.common.los.read(); + if los.in_space(object) { + return los.trace_object::(queue, object); + } } object } @@ -280,13 +290,13 @@ impl CommonGenPlan { /// Get pages reserved for the collection by a generational plan. A generational plan should /// add their own reservation with the value returned by this method. pub fn get_collection_reserved_pages(&self) -> usize { - self.nursery.reserved_pages() + self.nursery.read().reserved_pages() } /// Get pages used by a generational plan. A generational plan should add their own used pages /// with the value returned by this method. pub fn get_used_pages(&self) -> usize { - self.nursery.reserved_pages() + self.common.get_used_pages() + self.nursery.read().reserved_pages() + self.common.get_used_pages() } } diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index f1e54cf058..9f1e6c492e 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -18,6 +18,7 @@ use crate::scheduler::GCWorker; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::VMRequest; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -43,7 +44,7 @@ pub struct GenImmix { #[post_scan] #[space] #[copy_semantics(CopySemantics::Mature)] - pub immix_space: ImmixSpace, + pub immix_space: ArcFlexMut>, /// Whether the last GC was a defrag GC for the immix space. pub last_gc_was_defrag: AtomicBool, /// Whether the last GC was a full heap GC @@ -77,7 +78,10 @@ impl Plan for GenImmix { CopySemantics::Mature => CopySelector::ImmixHybrid(0), _ => CopySelector::Unused, }, - space_mapping: vec![(CopySelector::ImmixHybrid(0), &self.immix_space)], + space_mapping: vec![( + CopySelector::ImmixHybrid(0), + self.immix_space.clone().into_dyn_space(), + )], constraints: &GENIMMIX_CONSTRAINTS, } } @@ -111,7 +115,7 @@ impl Plan for GenImmix { GenImmix, GenImmixMatureGCWorkContext, GenImmixMatureGCWorkContext, - >(self, &self.immix_space, scheduler); + >(self, &self.immix_space.read(), scheduler); } } @@ -119,22 +123,20 @@ impl Plan for GenImmix { &super::mutator::ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.prepare(tls); if full_heap { - self.immix_space.prepare( - full_heap, - crate::policy::immix::defrag::StatsForDefrag::new(self), - ); + let stats_for_defrag = crate::policy::immix::defrag::StatsForDefrag::new(self); + self.immix_space.write().prepare(full_heap, stats_for_defrag); } } - fn release(&mut self, tls: VMWorkerThread) { + fn release(&self, tls: VMWorkerThread) { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.release(tls); if full_heap { - let did_defrag = self.immix_space.release(full_heap); + let did_defrag = self.immix_space.write().release(full_heap); self.last_gc_was_defrag.store(did_defrag, Ordering::Relaxed); } else { self.last_gc_was_defrag.store(false, Ordering::Relaxed); @@ -143,17 +145,17 @@ impl Plan for GenImmix { .store(full_heap, Ordering::Relaxed); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&self, _tls: VMWorkerThread) { self.gen .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); } fn get_collection_reserved_pages(&self) -> usize { - self.gen.get_collection_reserved_pages() + self.immix_space.defrag_headroom_pages() + self.gen.get_collection_reserved_pages() + self.immix_space.read().defrag_headroom_pages() } fn get_used_pages(&self) -> usize { - self.gen.get_used_pages() + self.immix_space.reserved_pages() + self.gen.get_used_pages() + self.immix_space.read().reserved_pages() } /// Return the number of pages available for allocation. Assuming all future allocations goes to nursery. @@ -188,19 +190,19 @@ impl GenerationalPlan for GenImmix { } fn is_object_in_nursery(&self, object: ObjectReference) -> bool { - self.gen.nursery.in_space(object) + self.gen.nursery.read().in_space(object) } fn is_address_in_nursery(&self, addr: Address) -> bool { - self.gen.nursery.address_in_space(addr) + self.gen.nursery.read().address_in_space(addr) } fn get_mature_physical_pages_available(&self) -> usize { - self.immix_space.available_physical_pages() + self.immix_space.read().available_physical_pages() } fn get_mature_reserved_pages(&self) -> usize { - self.immix_space.reserved_pages() + self.immix_space.read().reserved_pages() } fn force_full_heap_collection(&self) { @@ -231,7 +233,7 @@ impl GenImmix { global_side_metadata_specs: crate::plan::generational::new_generational_global_metadata_specs::(), }; - let immix_space = ImmixSpace::new( + let immix_space = ArcFlexMut::new(ImmixSpace::new( plan_args.get_space_args("immix_mature", true, VMRequest::discontiguous()), ImmixSpaceArgs { reset_log_bit_in_major_gc: false, @@ -241,7 +243,7 @@ impl GenImmix { // In GenImmix, young objects are not allocated in ImmixSpace directly. mixed_age: false, }, - ); + )); let genimmix = GenImmix { gen: CommonGenPlan::new(plan_args), diff --git a/src/plan/generational/immix/mutator.rs b/src/plan/generational/immix/mutator.rs index fdda23cd8b..06417a613d 100644 --- a/src/plan/generational/immix/mutator.rs +++ b/src/plan/generational/immix/mutator.rs @@ -34,7 +34,7 @@ pub fn create_genimmix_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new(create_gen_space_mapping( mmtk.get_plan(), - &genimmix.gen.nursery, + genimmix.gen.nursery.clone(), )), prepare_func: &unreachable_prepare_func, release_func: &genimmix_mutator_release, diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 5623cc8ba8..16268277cf 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -11,6 +11,7 @@ use crate::policy::space::Space; use crate::util::alloc::AllocatorSelector; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::Plan; @@ -85,9 +86,9 @@ lazy_static! { fn create_gen_space_mapping( plan: &'static dyn Plan, - nursery: &'static CopySpace, -) -> Vec<(AllocatorSelector, &'static dyn Space)> { + nursery: ArcFlexMut>, +) -> Vec<(AllocatorSelector, ArcFlexMut>)> { let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, plan); - vec.push((AllocatorSelector::BumpPointer(0), nursery)); + vec.push((AllocatorSelector::BumpPointer(0), nursery.into_dyn_space())); vec } diff --git a/src/plan/global.rs b/src/plan/global.rs index d99017a30f..f672418d7c 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -22,6 +22,7 @@ use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::options::Options; use crate::util::options::PlanSelector; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::statistics::stats::Stats; use crate::util::{conversions, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; @@ -167,7 +168,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Prepare the plan before a GC. This is invoked in an initial step in the GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn prepare(&mut self, tls: VMWorkerThread); + fn prepare(&self, tls: VMWorkerThread); /// Prepare a worker for a GC. Each worker has its own prepare method. This hook is for plan-specific /// per-worker preparation. This method is invoked once per worker by the worker thread passed as the argument. @@ -176,11 +177,11 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Release the plan after transitive closure. A plan can implement this method to call each policy's release, /// or create any work packet that should be done in release. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn release(&mut self, tls: VMWorkerThread); + fn release(&self, tls: VMWorkerThread); /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn end_of_gc(&mut self, _tls: VMWorkerThread) {} + fn end_of_gc(&self, _tls: VMWorkerThread) {} fn notify_emergency_collection(&self) { if let Some(gen) = self.generational() { @@ -312,13 +313,13 @@ pub struct BasePlan { // Spaces in base plan #[cfg(feature = "code_space")] #[space] - pub code_space: ImmortalSpace, + pub code_space: ArcFlexMut>, #[cfg(feature = "code_space")] #[space] - pub code_lo_space: ImmortalSpace, + pub code_lo_space: ArcFlexMut>, #[cfg(feature = "ro_space")] #[space] - pub ro_space: ImmortalSpace, + pub ro_space: ArcFlexMut>, /// A VM space is a space allocated and populated by the VM. Currently it is used by JikesRVM /// for boot image. @@ -334,7 +335,7 @@ pub struct BasePlan { /// the VM space. #[cfg(feature = "vm_space")] #[space] - pub vm_space: VMSpace, + pub vm_space: ArcFlexMut>, } /// Args needed for creating any plan. This includes a set of contexts from MMTK or global. This @@ -390,29 +391,29 @@ impl BasePlan { pub fn new(mut args: CreateSpecificPlanArgs) -> BasePlan { BasePlan { #[cfg(feature = "code_space")] - code_space: ImmortalSpace::new(args.get_space_args( + code_space: ArcFlexMut::new(ImmortalSpace::new(args.get_space_args( "code_space", true, VMRequest::discontiguous(), - )), + ))), #[cfg(feature = "code_space")] - code_lo_space: ImmortalSpace::new(args.get_space_args( + code_lo_space: ArcFlexMut::new(ImmortalSpace::new(args.get_space_args( "code_lo_space", true, VMRequest::discontiguous(), - )), + ))), #[cfg(feature = "ro_space")] - ro_space: ImmortalSpace::new(args.get_space_args( + ro_space: ArcFlexMut::new(ImmortalSpace::new(args.get_space_args( "ro_space", true, VMRequest::discontiguous(), - )), + ))), #[cfg(feature = "vm_space")] - vm_space: VMSpace::new(args.get_space_args( + vm_space: ArcFlexMut::new(VMSpace::new(args.get_space_args( "vm_space", false, VMRequest::discontiguous(), - )), + ))), global_state: args.global_args.state.clone(), gc_trigger: args.global_args.gc_trigger, @@ -428,12 +429,12 @@ impl BasePlan { #[cfg(feature = "code_space")] { - pages += self.code_space.reserved_pages(); - pages += self.code_lo_space.reserved_pages(); + pages += self.code_space.read().reserved_pages(); + pages += self.code_lo_space.read().reserved_pages(); } #[cfg(feature = "ro_space")] { - pages += self.ro_space.reserved_pages(); + pages += self.ro_space.read().reserved_pages(); } // If we need to count malloc'd size as part of our heap, we add it here. @@ -454,52 +455,52 @@ impl BasePlan { worker: &mut GCWorker, ) -> ObjectReference { #[cfg(feature = "code_space")] - if self.code_space.in_space(object) { + if self.code_space.read().in_space(object) { trace!("trace_object: object in code space"); - return self.code_space.trace_object::(queue, object); + return self.code_space.read().trace_object::(queue, object); } #[cfg(feature = "code_space")] - if self.code_lo_space.in_space(object) { + if self.code_lo_space.read().in_space(object) { trace!("trace_object: object in large code space"); - return self.code_lo_space.trace_object::(queue, object); + return self.code_lo_space.read().trace_object::(queue, object); } #[cfg(feature = "ro_space")] - if self.ro_space.in_space(object) { + if self.ro_space.read().in_space(object) { trace!("trace_object: object in ro_space space"); - return self.ro_space.trace_object(queue, object); + return self.ro_space.read().trace_object(queue, object); } #[cfg(feature = "vm_space")] - if self.vm_space.in_space(object) { + if self.vm_space.read().in_space(object) { trace!("trace_object: object in boot space"); - return self.vm_space.trace_object(queue, object); + return self.vm_space.read().trace_object(queue, object); } VM::VMActivePlan::vm_trace_object::(queue, object, worker) } - pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) { + pub fn prepare(&self, _tls: VMWorkerThread, _full_heap: bool) { #[cfg(feature = "code_space")] - self.code_space.prepare(); + self.code_space.write().prepare(); #[cfg(feature = "code_space")] - self.code_lo_space.prepare(); + self.code_lo_space.write().prepare(); #[cfg(feature = "ro_space")] - self.ro_space.prepare(); + self.ro_space.write().prepare(); #[cfg(feature = "vm_space")] - self.vm_space.prepare(); + self.vm_space.write().prepare(); } - pub fn release(&mut self, _tls: VMWorkerThread, _full_heap: bool) { + pub fn release(&self, _tls: VMWorkerThread, _full_heap: bool) { #[cfg(feature = "code_space")] - self.code_space.release(); + self.code_space.write().release(); #[cfg(feature = "code_space")] - self.code_lo_space.release(); + self.code_lo_space.write().release(); #[cfg(feature = "ro_space")] - self.ro_space.release(); + self.ro_space.write().release(); #[cfg(feature = "vm_space")] - self.vm_space.release(); + self.vm_space.write().release(); } pub(crate) fn collection_required(&self, plan: &P, space_full: bool) -> bool { @@ -539,12 +540,12 @@ CommonPlan is for representing state and features used by _many_ plans, but that #[derive(HasSpaces, PlanTraceObject)] pub struct CommonPlan { #[space] - pub immortal: ImmortalSpace, + pub immortal: ArcFlexMut>, #[space] - pub los: LargeObjectSpace, + pub los: ArcFlexMut>, // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmortalSpace, + pub nonmoving: ArcFlexMut>, #[parent] pub base: BasePlan, } @@ -552,28 +553,28 @@ pub struct CommonPlan { impl CommonPlan { pub fn new(mut args: CreateSpecificPlanArgs) -> CommonPlan { CommonPlan { - immortal: ImmortalSpace::new(args.get_space_args( + immortal: ArcFlexMut::new(ImmortalSpace::new(args.get_space_args( "immortal", true, VMRequest::discontiguous(), - )), - los: LargeObjectSpace::new( + ))), + los: ArcFlexMut::new(LargeObjectSpace::new( args.get_space_args("los", true, VMRequest::discontiguous()), false, - ), - nonmoving: ImmortalSpace::new(args.get_space_args( + )), + nonmoving: ArcFlexMut::new(ImmortalSpace::new(args.get_space_args( "nonmoving", true, VMRequest::discontiguous(), - )), + ))), base: BasePlan::new(args), } } pub fn get_used_pages(&self) -> usize { - self.immortal.reserved_pages() - + self.los.reserved_pages() - + self.nonmoving.reserved_pages() + self.immortal.read().reserved_pages() + + self.los.read().reserved_pages() + + self.nonmoving.read().reserved_pages() + self.base.get_used_pages() } @@ -583,46 +584,61 @@ impl CommonPlan { object: ObjectReference, worker: &mut GCWorker, ) -> ObjectReference { - if self.immortal.in_space(object) { - trace!("trace_object: object in immortal space"); - return self.immortal.trace_object(queue, object); + { + let space = self.immortal.read(); + if space.in_space(object) { + trace!("trace_object: object in immortal space"); + return space.trace_object(queue, object); + } } - if self.los.in_space(object) { - trace!("trace_object: object in los"); - return self.los.trace_object(queue, object); + { + let space = self.los.read(); + if space.in_space(object) { + trace!("trace_object: object in los space"); + return space.trace_object(queue, object); + } } - if self.nonmoving.in_space(object) { - trace!("trace_object: object in nonmoving space"); - return self.nonmoving.trace_object(queue, object); + { + let space = self.nonmoving.read(); + if space.in_space(object) { + trace!("trace_object: object in nonmoving space"); + return space.trace_object(queue, object); + } } self.base.trace_object::(queue, object, worker) } - pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { - self.immortal.prepare(); - self.los.prepare(full_heap); - self.nonmoving.prepare(); + pub fn prepare(&self, tls: VMWorkerThread, full_heap: bool) { + { + let mut space = self.immortal.write(); + space.prepare(); + } + { + let mut space = self.los.write(); + space.prepare(full_heap); + } + { + let mut space = self.nonmoving.write(); + space.prepare(); + } self.base.prepare(tls, full_heap) } - pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) { - self.immortal.release(); - self.los.release(full_heap); - self.nonmoving.release(); + pub fn release(&self, tls: VMWorkerThread, full_heap: bool) { + { + let mut space = self.immortal.write(); + space.release(); + } + { + let mut space = self.los.write(); + space.release(full_heap); + } + { + let mut space = self.nonmoving.write(); + space.release(); + } self.base.release(tls, full_heap) } - - pub fn get_immortal(&self) -> &ImmortalSpace { - &self.immortal - } - - pub fn get_los(&self) -> &LargeObjectSpace { - &self.los - } - - pub fn get_nonmoving(&self) -> &ImmortalSpace { - &self.nonmoving - } } use crate::policy::gc_work::TraceKind; @@ -656,7 +672,7 @@ pub trait HasSpaces { /// /// If `Self` contains nested fields that contain more spaces, this method shall visit spaces /// in the outer struct first. - fn for_each_space_mut(&mut self, func: &mut dyn FnMut(&mut dyn Space)); + fn for_each_space_mut(&self, func: &mut dyn FnMut(&mut dyn Space)); } /// A plan that uses `PlanProcessEdges` needs to provide an implementation for this trait. diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 3b1d6dfbd2..a24ebd0268 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -15,6 +15,7 @@ use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::VMBinding; use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; use std::sync::atomic::AtomicBool; @@ -29,7 +30,7 @@ pub struct Immix { #[post_scan] #[space] #[copy_semantics(CopySemantics::DefaultCopy)] - pub immix_space: ImmixSpace, + pub immix_space: ArcFlexMut>, #[parent] pub common: CommonPlan, last_gc_was_defrag: AtomicBool, @@ -66,7 +67,10 @@ impl Plan for Immix { CopySemantics::DefaultCopy => CopySelector::Immix(0), _ => CopySelector::Unused, }, - space_mapping: vec![(CopySelector::Immix(0), &self.immix_space)], + space_mapping: vec![( + CopySelector::Immix(0), + self.immix_space.clone().into_dyn_space(), + )], constraints: &IMMIX_CONSTRAINTS, } } @@ -76,34 +80,34 @@ impl Plan for Immix { Immix, ImmixGCWorkContext, ImmixGCWorkContext, - >(self, &self.immix_space, scheduler) + >(self, &self.immix_space.read(), scheduler) } fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { self.common.prepare(tls, true); - self.immix_space.prepare( - true, - crate::policy::immix::defrag::StatsForDefrag::new(self), - ); + let stats_for_defrag = crate::policy::immix::defrag::StatsForDefrag::new(self); + let mut space = self.immix_space.write(); + space.prepare(true, stats_for_defrag); } - fn release(&mut self, tls: VMWorkerThread) { + fn release(&self, tls: VMWorkerThread) { self.common.release(tls, true); // release the collected region - self.last_gc_was_defrag - .store(self.immix_space.release(true), Ordering::Relaxed); + let mut space = self.immix_space.write(); + let defrag_gc = space.release(true); + self.last_gc_was_defrag.store(defrag_gc, Ordering::Relaxed); } fn get_collection_reserved_pages(&self) -> usize { - self.immix_space.defrag_headroom_pages() + self.immix_space.read().defrag_headroom_pages() } fn get_used_pages(&self) -> usize { - self.immix_space.reserved_pages() + self.common.get_used_pages() + self.immix_space.read().reserved_pages() + self.common.get_used_pages() } fn base(&self) -> &BasePlan { @@ -141,10 +145,10 @@ impl Immix { space_args: ImmixSpaceArgs, ) -> Self { let immix = Immix { - immix_space: ImmixSpace::new( + immix_space: ArcFlexMut::new(ImmixSpace::new( plan_args.get_space_args("immix", true, VMRequest::discontiguous()), space_args, - ), + )), common: CommonPlan::new(plan_args), last_gc_was_defrag: AtomicBool::new(false), }; diff --git a/src/plan/immix/mutator.rs b/src/plan/immix/mutator.rs index 0df443c9cd..b662423bcc 100644 --- a/src/plan/immix/mutator.rs +++ b/src/plan/immix/mutator.rs @@ -49,7 +49,10 @@ pub fn create_immix_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new({ let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, immix); - vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); + vec.push(( + AllocatorSelector::Immix(0), + immix.immix_space.clone().into_dyn_space(), + )); vec }), prepare_func: &unreachable_prepare_func, diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index 4b60cafa9b..1bf3c17eec 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -6,6 +6,7 @@ use crate::scheduler::gc_work::*; use crate::scheduler::GCWork; use crate::scheduler::GCWorker; use crate::scheduler::WorkBucketStage; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::ActivePlan; use crate::vm::Scanning; use crate::vm::VMBinding; @@ -14,17 +15,17 @@ use std::marker::PhantomData; /// iterate through the heap and calculate the new location of live objects pub struct CalculateForwardingAddress { - mc_space: &'static MarkCompactSpace, + mc_space: ArcFlexMut>, } impl GCWork for CalculateForwardingAddress { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - self.mc_space.calculate_forwarding_pointer(); + self.mc_space.read().calculate_forwarding_pointer(); } } impl CalculateForwardingAddress { - pub fn new(mc_space: &'static MarkCompactSpace) -> Self { + pub fn new(mc_space: ArcFlexMut>) -> Self { Self { mc_space } } } @@ -78,17 +79,17 @@ impl UpdateReferences { /// compact live objects based on forwarding pointers calculated before pub struct Compact { - mc_space: &'static MarkCompactSpace, + mc_space: ArcFlexMut>, } impl GCWork for Compact { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - self.mc_space.compact(); + self.mc_space.read().compact(); } } impl Compact { - pub fn new(mc_space: &'static MarkCompactSpace) -> Self { + pub fn new(mc_space: ArcFlexMut>) -> Self { Self { mc_space } } } diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index cd01b86df5..93be511274 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -20,6 +20,7 @@ use crate::util::metadata::side_metadata::SideMetadataContext; #[cfg(not(feature = "vo_bit"))] use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::VMBinding; use enum_map::EnumMap; @@ -30,7 +31,7 @@ use mmtk_macros::{HasSpaces, PlanTraceObject}; pub struct MarkCompact { #[space] #[copy_semantics(CopySemantics::DefaultCopy)] - pub mc_space: MarkCompactSpace, + pub mc_space: ArcFlexMut>, #[parent] pub common: CommonPlan, } @@ -64,14 +65,14 @@ impl Plan for MarkCompact { &self.common } - fn prepare(&mut self, _tls: VMWorkerThread) { + fn prepare(&self, _tls: VMWorkerThread) { self.common.prepare(_tls, true); - self.mc_space.prepare(); + self.mc_space.read().prepare(); } - fn release(&mut self, _tls: VMWorkerThread) { + fn release(&self, _tls: VMWorkerThread) { self.common.release(_tls, true); - self.mc_space.release(); + self.mc_space.read().release(); } fn get_allocator_mapping(&self) -> &'static EnumMap { @@ -96,10 +97,11 @@ impl Plan for MarkCompact { .add(Prepare::>::new(self)); scheduler.work_buckets[WorkBucketStage::CalculateForwarding] - .add(CalculateForwardingAddress::::new(&self.mc_space)); + .add(CalculateForwardingAddress::::new(self.mc_space.clone())); // do another trace to update references scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::::new(self)); - scheduler.work_buckets[WorkBucketStage::Compact].add(Compact::::new(&self.mc_space)); + scheduler.work_buckets[WorkBucketStage::Compact] + .add(Compact::::new(self.mc_space.clone())); // Release global/collectors/mutators scheduler.work_buckets[WorkBucketStage::Release] @@ -166,7 +168,7 @@ impl Plan for MarkCompact { } fn get_used_pages(&self) -> usize { - self.mc_space.reserved_pages() + self.common.get_used_pages() + self.mc_space.read().reserved_pages() + self.common.get_used_pages() } fn get_collection_reserved_pages(&self) -> usize { @@ -192,8 +194,11 @@ impl MarkCompact { global_side_metadata_specs, }; - let mc_space = - MarkCompactSpace::new(plan_args.get_space_args("mc", true, VMRequest::discontiguous())); + let mc_space = ArcFlexMut::new(MarkCompactSpace::new(plan_args.get_space_args( + "mc", + true, + VMRequest::discontiguous(), + ))); let res = MarkCompact { mc_space, @@ -207,7 +212,7 @@ impl MarkCompact { } impl MarkCompact { - pub fn mc_space(&self) -> &MarkCompactSpace { + pub fn mc_space(&self) -> &ArcFlexMut> { &self.mc_space } } diff --git a/src/plan/markcompact/mutator.rs b/src/plan/markcompact/mutator.rs index fc42d7ea7e..3285174a0a 100644 --- a/src/plan/markcompact/mutator.rs +++ b/src/plan/markcompact/mutator.rs @@ -36,7 +36,10 @@ pub fn create_markcompact_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new({ let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, markcompact); - vec.push((AllocatorSelector::MarkCompact(0), markcompact.mc_space())); + vec.push(( + AllocatorSelector::MarkCompact(0), + markcompact.mc_space().clone().into_dyn_space(), + )); vec }), prepare_func: &unreachable_prepare_func, diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 5e127d9d8c..3b380a76d9 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -12,6 +12,7 @@ use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::VMWorkerThread; use crate::vm::VMBinding; use enum_map::EnumMap; @@ -32,7 +33,7 @@ pub struct MarkSweep { #[parent] common: CommonPlan, #[space] - ms: MarkSweepSpace, + ms: ArcFlexMut>, } pub const MS_CONSTRAINTS: PlanConstraints = PlanConstraints { @@ -56,13 +57,13 @@ impl Plan for MarkSweep { &ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { self.common.prepare(tls, true); - self.ms.prepare(); + self.ms.write().prepare(); } - fn release(&mut self, tls: VMWorkerThread) { - self.ms.release(); + fn release(&self, tls: VMWorkerThread) { + self.ms.write().release(); self.common.release(tls, true); } @@ -71,7 +72,7 @@ impl Plan for MarkSweep { } fn get_used_pages(&self) -> usize { - self.common.get_used_pages() + self.ms.reserved_pages() + self.common.get_used_pages() + self.ms.read().reserved_pages() } fn base(&self) -> &BasePlan { @@ -103,11 +104,11 @@ impl MarkSweep { }; let res = MarkSweep { - ms: MarkSweepSpace::new(plan_args.get_space_args( + ms: ArcFlexMut::new(MarkSweepSpace::new(plan_args.get_space_args( "ms", true, VMRequest::discontiguous(), - )), + ))), common: CommonPlan::new(plan_args), }; @@ -116,7 +117,7 @@ impl MarkSweep { res } - pub fn ms_space(&self) -> &MarkSweepSpace { + pub fn ms_space(&self) -> &ArcFlexMut> { &self.ms } } diff --git a/src/plan/marksweep/mutator.rs b/src/plan/marksweep/mutator.rs index d93431b0fa..7cfe01fd09 100644 --- a/src/plan/marksweep/mutator.rs +++ b/src/plan/marksweep/mutator.rs @@ -43,7 +43,10 @@ mod malloc_mark_sweep { Box::new({ let mut vec = crate::plan::mutator_context::create_space_mapping(RESERVED_ALLOCATORS, true, plan); - vec.push((AllocatorSelector::Malloc(0), ms.ms_space())); + vec.push(( + AllocatorSelector::Malloc(0), + ms.ms_space().clone().into_dyn_space(), + )); vec }) } @@ -98,7 +101,10 @@ mod native_mark_sweep { Box::new({ let mut vec = crate::plan::mutator_context::create_space_mapping(RESERVED_ALLOCATORS, true, plan); - vec.push((AllocatorSelector::FreeList(0), ms.ms_space())); + vec.push(( + AllocatorSelector::FreeList(0), + ms.ms_space().clone().into_dyn_space(), + )); vec }) } diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 9ed1c877e6..03867a0f64 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -6,13 +6,14 @@ use crate::plan::AllocationSemantics; use crate::policy::space::Space; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; use crate::util::alloc::Allocator; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::{Address, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use enum_map::EnumMap; -pub(crate) type SpaceMapping = Vec<(AllocatorSelector, &'static dyn Space)>; +pub(crate) type SpaceMapping = Vec<(AllocatorSelector, ArcFlexMut>)>; /// A place-holder implementation for `MutatorConfig::prepare_func` that should not be called. /// It is the most often used by plans that sets `PlanConstraints::needs_prepare_mutator` to @@ -57,13 +58,13 @@ impl std::fmt::Debug for MutatorConfig { f.write_str("MutatorConfig:\n")?; f.write_str("Semantics mapping:\n")?; for (semantic, selector) in self.allocator_mapping.iter() { - let space_name: &str = match self + let space_name = match self .space_mapping .iter() .find(|(selector_to_find, _)| selector_to_find == selector) { - Some((_, space)) => space.name(), - None => "!!!missing space here!!!", + Some((_, space)) => space.read().name().to_owned(), + None => "!!!missing space here!!!".to_string(), }; f.write_fmt(format_args!( "- {:?} = {:?} ({:?})\n", @@ -72,7 +73,11 @@ impl std::fmt::Debug for MutatorConfig { } f.write_str("Space mapping:\n")?; for (selector, space) in self.space_mapping.iter() { - f.write_fmt(format_args!("- {:?} = {:?}\n", selector, space.name()))?; + f.write_fmt(format_args!( + "- {:?} = {:?}\n", + selector, + space.read().name() + ))?; } Ok(()) } @@ -138,14 +143,10 @@ impl MutatorContext for Mutator { &mut self, refer: ObjectReference, _bytes: usize, - allocator: AllocationSemantics, + _allocator: AllocationSemantics, ) { - unsafe { - self.allocators - .get_allocator_mut(self.config.allocator_mapping[allocator]) - } - .get_space() - .initialize_object_metadata(refer, true) + unsafe { crate::mmtk::SFT_MAP.get_unchecked(refer.to_address::()) } + .initialize_object_metadata(refer, true) } fn get_tls(&self) -> VMMutatorThread { @@ -419,12 +420,12 @@ pub(crate) fn create_space_mapping( mut reserved: ReservedAllocators, include_common_plan: bool, plan: &'static dyn Plan, -) -> Vec<(AllocatorSelector, &'static dyn Space)> { +) -> Vec<(AllocatorSelector, ArcFlexMut>)> { // If we need to add new allocators, or new spaces, we need to make sure the allocator we assign here matches the allocator // we used in create_space_mapping(). The easiest way is to add the space/allocator mapping in the same order. So for any modification to this // function, please check the other function. - let mut vec: Vec<(AllocatorSelector, &'static dyn Space)> = vec![]; + let mut vec: Vec<(AllocatorSelector, ArcFlexMut>)> = vec![]; // spaces in BasePlan @@ -432,12 +433,12 @@ pub(crate) fn create_space_mapping( { vec.push(( AllocatorSelector::BumpPointer(reserved.n_bump_pointer), - &plan.base().code_space, + plan.base().code_space.clone().into_dyn_space(), )); reserved.n_bump_pointer += 1; vec.push(( AllocatorSelector::BumpPointer(reserved.n_bump_pointer), - &plan.base().code_lo_space, + plan.base().code_lo_space.clone().into_dyn_space(), )); reserved.n_bump_pointer += 1; } @@ -446,7 +447,7 @@ pub(crate) fn create_space_mapping( { vec.push(( AllocatorSelector::BumpPointer(reserved.n_bump_pointer), - &plan.base().ro_space, + plan.base().ro_space.clone().into_dyn_space(), )); reserved.n_bump_pointer += 1; } @@ -456,18 +457,18 @@ pub(crate) fn create_space_mapping( if include_common_plan { vec.push(( AllocatorSelector::BumpPointer(reserved.n_bump_pointer), - plan.common().get_immortal(), + plan.common().immortal.clone().into_dyn_space(), )); reserved.n_bump_pointer += 1; vec.push(( AllocatorSelector::LargeObject(reserved.n_large_object), - plan.common().get_los(), + plan.common().los.clone().into_dyn_space(), )); reserved.n_large_object += 1; // TODO: This should be freelist allocator once we use marksweep for nonmoving space. vec.push(( AllocatorSelector::BumpPointer(reserved.n_bump_pointer), - plan.common().get_nonmoving(), + plan.common().nonmoving.clone().into_dyn_space(), )); reserved.n_bump_pointer += 1; } diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 8e013135ed..3bb550bc8e 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -13,6 +13,7 @@ use crate::util::alloc::allocators::AllocatorSelector; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::VMBinding; use enum_map::EnumMap; use mmtk_macros::HasSpaces; @@ -27,11 +28,11 @@ pub struct NoGC { #[parent] pub base: BasePlan, #[space] - pub nogc_space: NoGCImmortalSpace, + pub nogc_space: ArcFlexMut>, #[space] - pub immortal: ImmortalSpace, + pub immortal: ArcFlexMut>, #[space] - pub los: ImmortalSpace, + pub los: ArcFlexMut>, } pub const NOGC_CONSTRAINTS: PlanConstraints = PlanConstraints { @@ -56,11 +57,11 @@ impl Plan for NoGC { &mut self.base } - fn prepare(&mut self, _tls: VMWorkerThread) { + fn prepare(&self, _tls: VMWorkerThread) { unreachable!() } - fn release(&mut self, _tls: VMWorkerThread) { + fn release(&self, _tls: VMWorkerThread) { unreachable!() } @@ -73,9 +74,9 @@ impl Plan for NoGC { } fn get_used_pages(&self) -> usize { - self.nogc_space.reserved_pages() - + self.immortal.reserved_pages() - + self.los.reserved_pages() + self.nogc_space.read().reserved_pages() + + self.immortal.read().reserved_pages() + + self.los.read().reserved_pages() + self.base.get_used_pages() } } @@ -89,21 +90,21 @@ impl NoGC { }; let res = NoGC { - nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( + nogc_space: ArcFlexMut::new(NoGCImmortalSpace::new(plan_args.get_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), VMRequest::discontiguous(), - )), - immortal: ImmortalSpace::new(plan_args.get_space_args( + ))), + immortal: ArcFlexMut::new(ImmortalSpace::new(plan_args.get_space_args( "immortal", true, VMRequest::discontiguous(), - )), - los: ImmortalSpace::new(plan_args.get_space_args( + ))), + los: ArcFlexMut::new(ImmortalSpace::new(plan_args.get_space_args( "los", true, VMRequest::discontiguous(), - )), + ))), base: BasePlan::new(plan_args), }; diff --git a/src/plan/nogc/mutator.rs b/src/plan/nogc/mutator.rs index 863384a4f8..a66ece4cb1 100644 --- a/src/plan/nogc/mutator.rs +++ b/src/plan/nogc/mutator.rs @@ -47,9 +47,18 @@ pub fn create_nogc_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new({ let mut vec = create_space_mapping(MULTI_SPACE_RESERVED_ALLOCATORS, false, plan); - vec.push((AllocatorSelector::BumpPointer(0), &plan.nogc_space)); - vec.push((AllocatorSelector::BumpPointer(1), &plan.immortal)); - vec.push((AllocatorSelector::BumpPointer(2), &plan.los)); + vec.push(( + AllocatorSelector::BumpPointer(0), + plan.nogc_space.clone().into_dyn_space(), + )); + vec.push(( + AllocatorSelector::BumpPointer(1), + plan.immortal.clone().into_dyn_space(), + )); + vec.push(( + AllocatorSelector::BumpPointer(2), + plan.los.clone().into_dyn_space(), + )); vec }), prepare_func: &unreachable_prepare_func, diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 44e25e2202..38ec423575 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -10,6 +10,7 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::{plan::global::BasePlan, vm::VMBinding}; use crate::{ plan::global::CommonPlan, policy::largeobjectspace::LargeObjectSpace, @@ -22,7 +23,7 @@ use mmtk_macros::{HasSpaces, PlanTraceObject}; #[derive(HasSpaces, PlanTraceObject)] pub struct PageProtect { #[space] - pub space: LargeObjectSpace, + pub space: ArcFlexMut>, #[parent] pub common: CommonPlan, } @@ -46,14 +47,14 @@ impl Plan for PageProtect { &ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { self.common.prepare(tls, true); - self.space.prepare(true); + self.space.write().prepare(true); } - fn release(&mut self, tls: VMWorkerThread) { + fn release(&self, tls: VMWorkerThread) { self.common.release(tls, true); - self.space.release(true); + self.space.write().release(true); } fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { @@ -61,7 +62,7 @@ impl Plan for PageProtect { } fn get_used_pages(&self) -> usize { - self.space.reserved_pages() + self.common.get_used_pages() + self.space.read().reserved_pages() + self.common.get_used_pages() } fn base(&self) -> &BasePlan { @@ -98,10 +99,10 @@ impl PageProtect { }; let ret = PageProtect { - space: LargeObjectSpace::new( + space: ArcFlexMut::new(LargeObjectSpace::new( plan_args.get_space_args("pageprotect", true, VMRequest::discontiguous()), true, - ), + )), common: CommonPlan::new(plan_args), }; diff --git a/src/plan/pageprotect/mutator.rs b/src/plan/pageprotect/mutator.rs index 10daf792a9..e4ac93e5c7 100644 --- a/src/plan/pageprotect/mutator.rs +++ b/src/plan/pageprotect/mutator.rs @@ -37,7 +37,10 @@ pub fn create_pp_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new({ let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, page); - vec.push((AllocatorSelector::LargeObject(0), &page.space)); + vec.push(( + AllocatorSelector::LargeObject(0), + page.space.clone().into_dyn_space(), + )); vec }), prepare_func: &unreachable_prepare_func, diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index eaa7147c78..665ad95c5a 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -14,6 +14,7 @@ use crate::util::copy::*; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::VMWorkerThread; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::{plan::global::BasePlan, vm::VMBinding}; use std::sync::atomic::{AtomicBool, Ordering}; @@ -26,10 +27,10 @@ pub struct SemiSpace { pub hi: AtomicBool, #[space] #[copy_semantics(CopySemantics::DefaultCopy)] - pub copyspace0: CopySpace, + pub copyspace0: ArcFlexMut>, #[space] #[copy_semantics(CopySemantics::DefaultCopy)] - pub copyspace1: CopySpace, + pub copyspace1: ArcFlexMut>, #[parent] pub common: CommonPlan, } @@ -59,7 +60,10 @@ impl Plan for SemiSpace { }, space_mapping: vec![ // // The tospace argument doesn't matter, we will rebind before a GC anyway. - (CopySelector::CopySpace(0), &self.copyspace0), + ( + CopySelector::CopySpace(0), + self.copyspace0.clone().into_dyn_space(), + ), ], constraints: &SS_CONSTRAINTS, } @@ -73,28 +77,30 @@ impl Plan for SemiSpace { &ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: VMWorkerThread) { + fn prepare(&self, tls: VMWorkerThread) { self.common.prepare(tls, true); self.hi .store(!self.hi.load(Ordering::SeqCst), Ordering::SeqCst); // flip the semi-spaces // prepare each of the collected regions let hi = self.hi.load(Ordering::SeqCst); - self.copyspace0.prepare(hi); - self.copyspace1.prepare(!hi); - self.fromspace_mut() + self.copyspace0.read().prepare(hi); + self.copyspace1.read().prepare(!hi); + self.fromspace() + .write() .set_copy_for_sft_trace(Some(CopySemantics::DefaultCopy)); - self.tospace_mut().set_copy_for_sft_trace(None); + self.tospace().write().set_copy_for_sft_trace(None); } fn prepare_worker(&self, worker: &mut GCWorker) { - unsafe { worker.get_copy_context_mut().copy[0].assume_init_mut() }.rebind(self.tospace()); + unsafe { worker.get_copy_context_mut().copy[0].assume_init_mut() } + .rebind(self.tospace().clone()); } - fn release(&mut self, tls: VMWorkerThread) { + fn release(&self, tls: VMWorkerThread) { self.common.release(tls, true); // release the collected region - self.fromspace().release(); + self.fromspace().read().release(); } fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { @@ -102,11 +108,11 @@ impl Plan for SemiSpace { } fn get_collection_reserved_pages(&self) -> usize { - self.tospace().reserved_pages() + self.tospace().read().reserved_pages() } fn get_used_pages(&self) -> usize { - self.tospace().reserved_pages() + self.common.get_used_pages() + self.tospace().read().reserved_pages() + self.common.get_used_pages() } fn get_available_pages(&self) -> usize { @@ -139,14 +145,14 @@ impl SemiSpace { let res = SemiSpace { hi: AtomicBool::new(false), - copyspace0: CopySpace::new( + copyspace0: ArcFlexMut::new(CopySpace::new( plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false, - ), - copyspace1: CopySpace::new( + )), + copyspace1: ArcFlexMut::new(CopySpace::new( plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true, - ), + )), common: CommonPlan::new(plan_args), }; @@ -155,7 +161,7 @@ impl SemiSpace { res } - pub fn tospace(&self) -> &CopySpace { + pub fn tospace(&self) -> &ArcFlexMut> { if self.hi.load(Ordering::SeqCst) { &self.copyspace1 } else { @@ -163,27 +169,11 @@ impl SemiSpace { } } - pub fn tospace_mut(&mut self) -> &mut CopySpace { - if self.hi.load(Ordering::SeqCst) { - &mut self.copyspace1 - } else { - &mut self.copyspace0 - } - } - - pub fn fromspace(&self) -> &CopySpace { + pub fn fromspace(&self) -> &ArcFlexMut> { if self.hi.load(Ordering::SeqCst) { &self.copyspace0 } else { &self.copyspace1 } } - - pub fn fromspace_mut(&mut self) -> &mut CopySpace { - if self.hi.load(Ordering::SeqCst) { - &mut self.copyspace0 - } else { - &mut self.copyspace1 - } - } } diff --git a/src/plan/semispace/mutator.rs b/src/plan/semispace/mutator.rs index 2fe6474518..48dd37bdfd 100644 --- a/src/plan/semispace/mutator.rs +++ b/src/plan/semispace/mutator.rs @@ -28,7 +28,9 @@ pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWork .plan .downcast_ref::>() .unwrap() - .tospace(), + .tospace() + .clone() + .into_dyn_space(), ); } @@ -54,7 +56,10 @@ pub fn create_ss_mutator( allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new({ let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, ss); - vec.push((AllocatorSelector::BumpPointer(0), ss.tospace())); + vec.push(( + AllocatorSelector::BumpPointer(0), + ss.tospace().clone().into_dyn_space(), + )); vec }), prepare_func: &unreachable_prepare_func, diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index a97fb8e98d..c1458b97ef 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -11,6 +11,7 @@ use crate::util::copy::CopyConfig; use crate::util::copy::CopySelector; use crate::util::copy::CopySemantics; use crate::util::metadata::side_metadata::SideMetadataContext; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::statistics::counter::EventCounter; use crate::vm::ObjectModel; use crate::vm::VMBinding; @@ -55,7 +56,10 @@ impl Plan for StickyImmix { CopySemantics::DefaultCopy => CopySelector::Immix(0), _ => CopySelector::Unused, }, - space_mapping: vec![(CopySelector::Immix(0), &self.immix.immix_space)], + space_mapping: vec![( + CopySelector::Immix(0), + self.immix.immix_space.clone().into_dyn_space(), + )], constraints: &STICKY_IMMIX_CONSTRAINTS, } } @@ -94,7 +98,7 @@ impl Plan for StickyImmix { StickyImmix, StickyImmixMatureGCWorkContext, StickyImmixMatureGCWorkContext, - >(self, &self.immix.immix_space, scheduler); + >(self, &self.immix.immix_space.read(), scheduler); } } @@ -105,32 +109,30 @@ impl Plan for StickyImmix { &super::mutator::ALLOCATOR_MAPPING } - fn prepare(&mut self, tls: crate::util::VMWorkerThread) { + fn prepare(&self, tls: crate::util::VMWorkerThread) { if self.is_current_gc_nursery() { // Prepare both large object space and immix space - self.immix.immix_space.prepare( - false, - crate::policy::immix::defrag::StatsForDefrag::new(self), - ); - self.immix.common.los.prepare(false); + let stats_for_defrag = crate::policy::immix::defrag::StatsForDefrag::new(self); + self.immix.immix_space.write().prepare(false, stats_for_defrag); + self.immix.common.los.write().prepare(false); } else { self.full_heap_gc_count.lock().unwrap().inc(); self.immix.prepare(tls); } } - fn release(&mut self, tls: crate::util::VMWorkerThread) { + fn release(&self, tls: crate::util::VMWorkerThread) { if self.is_current_gc_nursery() { - let was_defrag = self.immix.immix_space.release(false); + let was_defrag = self.immix.immix_space.write().release(false); self.immix .set_last_gc_was_defrag(was_defrag, Ordering::Relaxed); - self.immix.common.los.release(false); + self.immix.common.los.write().release(false); } else { self.immix.release(tls); } } - fn end_of_gc(&mut self, _tls: crate::util::opaque_pointer::VMWorkerThread) { + fn end_of_gc(&self, _tls: crate::util::opaque_pointer::VMWorkerThread) { let next_gc_full_heap = crate::plan::generational::global::CommonGenPlan::should_next_gc_be_full_heap(self); self.next_gc_full_heap @@ -142,9 +144,12 @@ impl Plan for StickyImmix { space_full: bool, space: Option<&dyn crate::policy::space::Space>, ) -> bool { - let nursery_full = - self.immix.immix_space.get_pages_allocated() > self.options().get_max_nursery_pages(); - if space_full && space.is_some() && space.unwrap().name() != self.immix.immix_space.name() { + let nursery_full = self.immix.immix_space.read().get_pages_allocated() + > self.options().get_max_nursery_pages(); + if space_full + && space.is_some() + && space.unwrap().name() != self.immix.immix_space.read().name() + { self.next_gc_full_heap.store(true, Ordering::SeqCst); } self.immix.collection_required(space_full, space) || nursery_full @@ -171,15 +176,16 @@ impl Plan for StickyImmix { } // Every reachable object should be marked - if self.immix.immix_space.in_space(object) && !self.immix.immix_space.is_marked(object) + if self.immix.immix_space.read().in_space(object) + && !self.immix.immix_space.read().is_marked(object) { error!( "Object {} is not marked (all objects that have been traced should be marked)", object ); return false; - } else if self.immix.common.los.in_space(object) - && !self.immix.common.los.is_live(object) + } else if self.immix.common.los.read().in_space(object) + && !self.immix.common.los.read().is_live(object) { error!("LOS Object {} is not marked", object); return false; @@ -195,7 +201,8 @@ impl GenerationalPlan for StickyImmix { } fn is_object_in_nursery(&self, object: crate::util::ObjectReference) -> bool { - self.immix.immix_space.in_space(object) && !self.immix.immix_space.is_marked(object) + self.immix.immix_space.read().in_space(object) + && !self.immix.immix_space.read().is_marked(object) } // This check is used for memory slice copying barrier, where we only know addresses instead of objects. @@ -209,11 +216,11 @@ impl GenerationalPlan for StickyImmix { } fn get_mature_physical_pages_available(&self) -> usize { - self.immix.immix_space.available_physical_pages() + self.immix.immix_space.read().available_physical_pages() } fn get_mature_reserved_pages(&self) -> usize { - self.immix.immix_space.reserved_pages() + self.immix.immix_space.read().reserved_pages() } fn force_full_heap_collection(&self) { @@ -232,22 +239,26 @@ impl crate::plan::generational::global::GenerationalPlanExt f object: crate::util::ObjectReference, worker: &mut crate::scheduler::GCWorker, ) -> crate::util::ObjectReference { - if self.immix.immix_space.in_space(object) { + if self.immix.immix_space.read().in_space(object) { if !self.is_object_in_nursery(object) { // Mature object trace!("Immix mature object {}, skip", object); return object; } else { let object = if crate::policy::immix::PREFER_COPY_ON_NURSERY_GC { - let ret = self.immix.immix_space.trace_object_with_opportunistic_copy( - queue, - object, - // We just use default copy here. We have set args for ImmixSpace to deal with unlogged bit, - // and we do not need to use CopySemantics::PromoteToMature. - CopySemantics::DefaultCopy, - worker, - true, - ); + let ret = self + .immix + .immix_space + .read() + .trace_object_with_opportunistic_copy( + queue, + object, + // We just use default copy here. We have set args for ImmixSpace to deal with unlogged bit, + // and we do not need to use CopySemantics::PromoteToMature. + CopySemantics::DefaultCopy, + worker, + true, + ); trace!( "Immix nursery object {} is being traced with opportunistic copy {}", object, @@ -265,6 +276,7 @@ impl crate::plan::generational::global::GenerationalPlanExt f ); self.immix .immix_space + .read() .trace_object_without_moving(queue, object) }; @@ -272,11 +284,12 @@ impl crate::plan::generational::global::GenerationalPlanExt f } } - if self.immix.common().get_los().in_space(object) { + if self.immix.common().los.read().in_space(object) { return self .immix .common() - .get_los() + .los + .read() .trace_object::(queue, object); } @@ -348,7 +361,7 @@ impl StickyImmix { } } - pub fn get_immix_space(&self) -> &ImmixSpace { + pub fn get_immix_space(&self) -> &ArcFlexMut> { &self.immix.immix_space } } diff --git a/src/plan/sticky/immix/mutator.rs b/src/plan/sticky/immix/mutator.rs index 4957872d6a..11a6860e8a 100644 --- a/src/plan/sticky/immix/mutator.rs +++ b/src/plan/sticky/immix/mutator.rs @@ -26,7 +26,10 @@ pub fn create_stickyimmix_mutator( space_mapping: Box::new({ let mut vec = create_space_mapping(immix::mutator::RESERVED_ALLOCATORS, true, mmtk.get_plan()); - vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); + vec.push(( + AllocatorSelector::Immix(0), + stickyimmix.get_immix_space().clone().into_dyn_space(), + )); vec }), prepare_func: &unreachable_prepare_func, diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 8d08ec1507..815d2db7e6 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -301,6 +301,8 @@ use crate::util::alloc::Allocator; use crate::util::alloc::BumpAllocator; use crate::util::opaque_pointer::VMWorkerThread; +use crate::util::rust_util::flex_mut::ArcFlexMut; + /// Copy allocator for CopySpace pub struct CopySpaceCopyContext { copy_allocator: BumpAllocator, @@ -328,17 +330,16 @@ impl CopySpaceCopyContext { pub(crate) fn new( tls: VMWorkerThread, context: Arc>, - tospace: &'static CopySpace, + tospace: ArcFlexMut>, ) -> Self { CopySpaceCopyContext { - copy_allocator: BumpAllocator::new(tls.0, tospace, context), + copy_allocator: BumpAllocator::new(tls.0, tospace.into_dyn_space(), context), } } } impl CopySpaceCopyContext { - pub fn rebind(&mut self, space: &CopySpace) { - self.copy_allocator - .rebind(unsafe { &*{ space as *const _ } }); + pub fn rebind(&mut self, space: ArcFlexMut>) { + self.copy_allocator.rebind(space.into_dyn_space()); } } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 3809f7bd24..ba8e0651c6 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -6,7 +6,7 @@ use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN}; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::sft_map::SFTMap; -use crate::policy::space::{CommonSpace, Space}; +use crate::policy::space::{CommonSpace, Space, SpaceAllocFail}; use crate::util::alloc::allocator::AllocatorContext; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::copy::*; @@ -19,6 +19,7 @@ use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::metadata::vo_bit; use crate::util::metadata::{self, MetadataSpec}; use crate::util::object_forwarding; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::{Address, ObjectReference}; use crate::vm::*; use crate::{ @@ -493,18 +494,17 @@ impl ImmixSpace { } /// Allocate a clean block. - pub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Option { - let block_address = self.acquire(tls, Block::PAGES); - if block_address.is_zero() { - return None; - } - self.defrag.notify_new_clean_block(copy); - let block = Block::from_aligned_address(block_address); - block.init(copy); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); - self.lines_consumed - .fetch_add(Block::LINES, Ordering::SeqCst); - Some(block) + pub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Result { + let alloc_res = self.acquire(tls, Block::PAGES); + alloc_res.map(|block_address| { + self.defrag.notify_new_clean_block(copy); + let block = Block::from_aligned_address(block_address); + block.init(copy); + self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.lines_consumed + .fetch_add(Block::LINES, Ordering::SeqCst); + block + }) } /// Pop a reusable block from the reusable block list. @@ -962,7 +962,7 @@ impl PolicyCopyContext for ImmixCopyContext { self.allocator.alloc(bytes, align, offset) } fn post_copy(&mut self, obj: ObjectReference, bytes: usize) { - self.get_space().post_copy(obj, bytes) + self.allocator.space.read().post_copy(obj, bytes) } } @@ -970,16 +970,12 @@ impl ImmixCopyContext { pub(crate) fn new( tls: VMWorkerThread, context: Arc>, - space: &'static ImmixSpace, + space: ArcFlexMut>, ) -> Self { ImmixCopyContext { - allocator: ImmixAllocator::new(tls.0, Some(space), context, true), + allocator: ImmixAllocator::new(tls.0, space, context, true), } } - - fn get_space(&self) -> &ImmixSpace { - self.allocator.immix_space() - } } /// Hybrid Immix copy context. It includes two different immix allocators. One with `copy = true` @@ -1008,14 +1004,14 @@ impl PolicyCopyContext for ImmixHybridCopyContext { align: usize, offset: usize, ) -> Address { - if self.get_space().in_defrag() { + if self.get_space().read().in_defrag() { self.defrag_allocator.alloc(bytes, align, offset) } else { self.copy_allocator.alloc(bytes, align, offset) } } fn post_copy(&mut self, obj: ObjectReference, bytes: usize) { - self.get_space().post_copy(obj, bytes) + self.get_space().read().post_copy(obj, bytes) } } @@ -1023,22 +1019,22 @@ impl ImmixHybridCopyContext { pub(crate) fn new( tls: VMWorkerThread, context: Arc>, - space: &'static ImmixSpace, + space: ArcFlexMut>, ) -> Self { ImmixHybridCopyContext { - copy_allocator: ImmixAllocator::new(tls.0, Some(space), context.clone(), false), - defrag_allocator: ImmixAllocator::new(tls.0, Some(space), context, true), + copy_allocator: ImmixAllocator::new(tls.0, space.clone(), context.clone(), false), + defrag_allocator: ImmixAllocator::new(tls.0, space, context, true), } } - fn get_space(&self) -> &ImmixSpace { + fn get_space(&self) -> &ArcFlexMut> { // Both copy allocators should point to the same space. debug_assert_eq!( - self.defrag_allocator.immix_space().common().descriptor, - self.copy_allocator.immix_space().common().descriptor + self.defrag_allocator.space.read().common().descriptor, + self.copy_allocator.space.read().common().descriptor ); // Just get the space from either allocator - self.defrag_allocator.immix_space() + &self.defrag_allocator.space } } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index ec6b2f7506..32da9b53cf 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -123,6 +123,8 @@ impl Space for LargeObjectSpace { use crate::scheduler::GCWorker; use crate::util::copy::CopySemantics; +use super::space::SpaceAllocFail; + impl crate::policy::gc_work::PolicyTraceObject for LargeObjectSpace { fn trace_object( &self, @@ -242,7 +244,7 @@ impl LargeObjectSpace { } /// Allocate an object - pub fn allocate_pages(&self, tls: VMThread, pages: usize) -> Address { + pub fn allocate_pages(&self, tls: VMThread, pages: usize) -> Result { self.acquire(tls, pages) } diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index bd2cabad40..db6ef8d2b3 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -114,7 +114,7 @@ impl Space for LockFreeImmortalSpace { data_pages + meta_pages } - fn acquire(&self, _tls: VMThread, pages: usize) -> Address { + fn acquire(&self, _tls: VMThread, pages: usize) -> Result { let bytes = conversions::pages_to_bytes(pages); let start = self .cursor @@ -128,7 +128,7 @@ impl Space for LockFreeImmortalSpace { if self.slow_path_zeroing { crate::util::memory::zero(start, bytes); } - start + Ok(start) } /// Get the name of the space @@ -151,6 +151,8 @@ use crate::plan::{ObjectQueue, VectorObjectQueue}; use crate::scheduler::GCWorker; use crate::util::copy::CopySemantics; +use super::space::SpaceAllocFail; + impl crate::policy::gc_work::PolicyTraceObject for LockFreeImmortalSpace { fn trace_object( &self, diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 7454fbb287..a261335842 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -6,6 +6,7 @@ use crate::plan::VectorObjectQueue; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::space::CommonSpace; +use crate::policy::space::SpaceAllocFail; use crate::scheduler::GCWorkScheduler; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::PageResource; @@ -20,7 +21,7 @@ use crate::util::Address; use crate::util::ObjectReference; use crate::util::{conversions, metadata}; use crate::vm::VMBinding; -use crate::vm::{ActivePlan, Collection, ObjectModel}; +use crate::vm::{ActivePlan, ObjectModel}; use crate::{policy::space::Space, util::heap::layout::vm_layout::BYTES_IN_CHUNK}; #[cfg(debug_assertions)] use std::collections::HashMap; @@ -327,12 +328,17 @@ impl MallocSpace { } } - pub fn alloc(&self, tls: VMThread, size: usize, align: usize, offset: usize) -> Address { + pub fn alloc( + &self, + tls: VMThread, + size: usize, + align: usize, + offset: usize, + ) -> Result { // TODO: Should refactor this and Space.acquire() if self.get_gc_trigger().poll(false, Some(self)) { assert!(VM::VMActivePlan::is_mutator(tls), "Polling in GC worker"); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); - return unsafe { Address::zero() }; + return Err(SpaceAllocFail); } let (address, is_offset_malloc) = alloc::(size, align, offset); @@ -363,7 +369,7 @@ impl MallocSpace { } } - address + Ok(address) } pub fn free(&self, addr: Address) { diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 8d8eae7d0e..7102d56a6b 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -335,10 +335,9 @@ impl MarkSweepSpace { } let acquired = self.acquire(tls, Block::BYTES >> LOG_BYTES_IN_PAGE); - if acquired.is_zero() { - BlockAcquireResult::Exhausted - } else { - BlockAcquireResult::Fresh(Block::from_unaligned_address(acquired)) + match acquired { + Err(_) => BlockAcquireResult::Exhausted, + Ok(addr) => BlockAcquireResult::Fresh(Block::from_unaligned_address(addr)), } } diff --git a/src/policy/space.rs b/src/policy/space.rs index 56ae00f8af..10c4e9fee6 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -37,7 +37,21 @@ use std::sync::Mutex; use downcast_rs::Downcast; -pub trait Space: 'static + SFT + Sync + Downcast { +/// The type represents a failure to allocate from a space, similar to [`crate::util::heap::pageresource::PRAllocFail`]. +pub struct SpaceAllocFail; + +/// This trait defines and manages spaces. A space is a region of virtual memory (contiguous or +/// discontigous) which is subject to the same memory management +/// regime (also known as 'policy'). Multiple spaces (instances of this class or its +/// descendants) may have the same policy (eg there could be numerous +/// instances of CopySpace, each with different roles). Spaces are +/// defined in terms of a unique region of virtual memory, so no two +/// space instances ever share any virtual memory. +/// +/// In addition to tracking virtual memory use and the mapping to +/// policy, spaces also manage memory consumption (used virtual +/// memory). +pub trait Space: 'static + SFT + Sync + Send + Downcast { fn as_space(&self) -> &dyn Space; fn as_sft(&self) -> &(dyn SFT + Sync + 'static); fn get_page_resource(&self) -> &dyn PageResource; @@ -75,7 +89,23 @@ pub trait Space: 'static + SFT + Sync + Downcast { false } - fn acquire(&self, tls: VMThread, pages: usize) -> Address { + /// Acquire a number of pages from the page resource, returning + /// a result, either represetning the address of the first page, + /// or an error for failing to allocate. + /// + /// When returning an error, a GC is requested in the method, and the caller should + /// call [`crate::vm::Collection::block_for_gc`] and wait for the GC. + /// + /// First the page budget is checked to see whether polling the GC is + /// necessary. If so, the GC is polled. If a GC is required then the + /// request fails and an error is returned. + /// + /// If the check of the page budget does not lead to GC being + /// triggered, then a request is made for specific pages in virtual + /// memory. If the page manager cannot satisify this request, then + /// the request fails, a GC is forced, and an error is returned. + /// Otherwise the address of the first page is returned. + fn acquire(&self, tls: VMThread, pages: usize) -> Result { trace!("Space.acquire, tls={:?}", tls); debug_assert!( @@ -111,8 +141,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { .policy .on_pending_allocation(pages_reserved); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator - unsafe { Address::zero() } + Err(SpaceAllocFail) } else { debug!("Collection not required"); @@ -204,7 +233,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { } debug!("Space.acquire(), returned = {}", res.start); - res.start + Ok(res.start) } Err(_) => { drop(lock); // drop the lock immediately @@ -224,8 +253,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { .policy .on_pending_allocation(pages_reserved); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator. - unsafe { Address::zero() } + Err(SpaceAllocFail) } } } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 7aa1eb9903..bb8e1fdd19 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -2,6 +2,7 @@ use crate::mmtk::SFT_MAP; use crate::plan::{ObjectQueue, VectorObjectQueue}; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; +use crate::policy::space::SpaceAllocFail; use crate::policy::space::{CommonSpace, Space}; use crate::util::address::Address; use crate::util::constants::BYTES_IN_PAGE; @@ -119,7 +120,7 @@ impl Space for VMSpace { unreachable!() } - fn acquire(&self, _tls: VMThread, _pages: usize) -> Address { + fn acquire(&self, _tls: VMThread, _pages: usize) -> Result { unreachable!() } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 88bba5c54c..d0ce789d24 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -55,11 +55,10 @@ impl Prepare { impl GCWork for Prepare { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("Prepare Global"); - // We assume this is the only running work packet that accesses plan at the point of execution - let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; - plan_mut.prepare(worker.tls); + let plan = mmtk.get_plan(); + plan.prepare(worker.tls); - if plan_mut.constraints().needs_prepare_mutator { + if plan.constraints().needs_prepare_mutator { for mutator in ::VMActivePlan::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] .add(PrepareMutator::::new(mutator)); @@ -128,10 +127,8 @@ impl GCWork for Release { trace!("Release Global"); mmtk.gc_trigger.policy.on_gc_release(mmtk); - // We assume this is the only running work packet that accesses plan at the point of execution - let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; - plan_mut.release(worker.tls); + mmtk.get_plan().release(worker.tls); for mutator in ::VMActivePlan::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Release] @@ -244,9 +241,7 @@ impl GCWork for EndOfGC { ); } - // We assume this is the only running work packet that accesses plan at the point of execution - let plan_mut: &mut dyn Plan = unsafe { mmtk.get_plan_mut() }; - plan_mut.end_of_gc(worker.tls); + mmtk.get_plan().end_of_gc(worker.tls); #[cfg(feature = "extreme_assertions")] if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) { diff --git a/src/scheduler/work.rs b/src/scheduler/work.rs index 6018eed0a3..fe1366e020 100644 --- a/src/scheduler/work.rs +++ b/src/scheduler/work.rs @@ -15,7 +15,7 @@ pub trait GCWork: 'static + Send { /// If the feature "work_packet_stats" is not enabled, this call simply forwards the call /// to `do_work()`. fn do_work_with_stat(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - debug!("{}", std::any::type_name::()); + debug!("{} start", std::any::type_name::()); debug_assert!(!worker.tls.0.0.is_null(), "TLS must be set correctly for a GC worker before the worker does any work. GC Worker {} has no valid tls.", worker.ordinal); #[cfg(feature = "work_packet_stats")] @@ -34,6 +34,7 @@ pub trait GCWork: 'static + Send { let mut worker_stat = worker.shared.borrow_stat_mut(); stat.end_of_work(&mut worker_stat); } + debug!("{} end", std::any::type_name::()); } /// Get the compile-time static type name for the work packet. diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index eae4eefbcc..fc56cadd04 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -9,7 +9,6 @@ use crate::MMTK; use std::sync::atomic::Ordering; use std::sync::Arc; -use crate::policy::space::Space; use crate::util::constants::*; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -155,10 +154,7 @@ pub trait Allocator: Downcast { /// Return the [`VMThread`] associated with this allocator instance. fn get_tls(&self) -> VMThread; - /// Return the [`Space`](src/policy/space/Space) instance associated with this allocator instance. - fn get_space(&self) -> &'static dyn Space; - - /// Return the context for the allocator. + // Return the context for the allocator. fn get_context(&self) -> &AllocatorContext; /// Return if this allocator can do thread local allocation. If an allocator does not do thread diff --git a/src/util/alloc/allocators.rs b/src/util/alloc/allocators.rs index f35707e660..6f3780c9d8 100644 --- a/src/util/alloc/allocators.rs +++ b/src/util/alloc/allocators.rs @@ -3,13 +3,11 @@ use std::sync::Arc; use memoffset::offset_of; -use crate::policy::largeobjectspace::LargeObjectSpace; -use crate::policy::marksweepspace::malloc_ms::MallocSpace; -use crate::policy::marksweepspace::native_ms::MarkSweepSpace; use crate::policy::space::Space; use crate::util::alloc::LargeObjectAllocator; use crate::util::alloc::MallocAllocator; use crate::util::alloc::{Allocator, BumpAllocator, ImmixAllocator}; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::VMMutatorThread; use crate::vm::VMBinding; use crate::Mutator; @@ -102,7 +100,7 @@ impl Allocators { pub fn new( mutator_tls: VMMutatorThread, mmtk: &MMTK, - space_mapping: &[(AllocatorSelector, &'static dyn Space)], + space_mapping: &[(AllocatorSelector, ArcFlexMut>)], ) -> Self { let mut ret = Allocators { bump_pointer: unsafe { MaybeUninit::uninit().assume_init() }, @@ -114,46 +112,47 @@ impl Allocators { }; let context = Arc::new(AllocatorContext::new(mmtk)); - for &(selector, space) in space_mapping.iter() { + for (selector, space) in space_mapping.iter() { + let space = space.clone(); match selector { AllocatorSelector::BumpPointer(index) => { - ret.bump_pointer[index as usize].write(BumpAllocator::new( + ret.bump_pointer[*index as usize].write(BumpAllocator::new( mutator_tls.0, space, context.clone(), )); } AllocatorSelector::LargeObject(index) => { - ret.large_object[index as usize].write(LargeObjectAllocator::new( + ret.large_object[*index as usize].write(LargeObjectAllocator::new( mutator_tls.0, - space.downcast_ref::>().unwrap(), + space.downcast(), context.clone(), )); } AllocatorSelector::Malloc(index) => { - ret.malloc[index as usize].write(MallocAllocator::new( + ret.malloc[*index as usize].write(MallocAllocator::new( mutator_tls.0, - space.downcast_ref::>().unwrap(), + space.downcast(), context.clone(), )); } AllocatorSelector::Immix(index) => { - ret.immix[index as usize].write(ImmixAllocator::new( + ret.immix[*index as usize].write(ImmixAllocator::new( mutator_tls.0, - Some(space), + space.downcast(), context.clone(), false, )); } AllocatorSelector::FreeList(index) => { - ret.free_list[index as usize].write(FreeListAllocator::new( + ret.free_list[*index as usize].write(FreeListAllocator::new( mutator_tls.0, - space.downcast_ref::>().unwrap(), + space.downcast(), context.clone(), )); } AllocatorSelector::MarkCompact(index) => { - ret.markcompact[index as usize].write(MarkCompactAllocator::new( + ret.markcompact[*index as usize].write(MarkCompactAllocator::new( mutator_tls.0, space, context.clone(), diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 98c33fc56d..b8daf2ad67 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -7,6 +7,7 @@ use crate::util::alloc::Allocator; use crate::policy::space::Space; use crate::util::conversions::bytes_to_pages; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::vm::VMBinding; const BYTES_IN_PAGE: usize = 1 << 12; @@ -20,7 +21,7 @@ pub struct BumpAllocator { /// Bump-pointer itself. pub bump_pointer: BumpPointer, /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. - space: &'static dyn Space, + space: ArcFlexMut>, pub(in crate::util::alloc) context: Arc>, } @@ -65,7 +66,7 @@ impl BumpAllocator { self.bump_pointer.reset(zero, zero); } - pub fn rebind(&mut self, space: &'static dyn Space) { + pub fn rebind(&mut self, space: ArcFlexMut>) { self.reset(); self.space = space; } @@ -77,10 +78,6 @@ use crate::util::alloc::fill_alignment_gap; use super::allocator::AllocatorContext; impl Allocator for BumpAllocator { - fn get_space(&self) -> &'static dyn Space { - self.space - } - fn get_context(&self) -> &AllocatorContext { &self.context } @@ -171,7 +168,7 @@ impl Allocator for BumpAllocator { impl BumpAllocator { pub(crate) fn new( tls: VMThread, - space: &'static dyn Space, + space: ArcFlexMut>, context: Arc>, ) -> Self { BumpAllocator { @@ -189,34 +186,42 @@ impl BumpAllocator { offset: usize, stress_test: bool, ) -> Address { - if self.space.will_oom_on_acquire(self.tls, size) { + if self.space.read().will_oom_on_acquire(self.tls, size) { return Address::ZERO; } let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK); - let acquired_start = self.space.acquire(self.tls, bytes_to_pages(block_size)); - if acquired_start.is_zero() { - trace!("Failed to acquire a new block"); - acquired_start - } else { - trace!( - "Acquired a new block of size {} with start address {}", - block_size, - acquired_start - ); - if !stress_test { - self.set_limit(acquired_start, acquired_start + block_size); - self.alloc(size, align, offset) - } else { - // For a stress test, we artificially make the fastpath fail by - // manipulating the limit as below. - // The assumption here is that we use an address range such that - // cursor > block_size always. - self.set_limit(acquired_start, unsafe { Address::from_usize(block_size) }); - // Note that we have just acquired a new block so we know that we don't have to go - // through the entire allocation sequence again, we can directly call the slow path - // allocation. - self.alloc_slow_once_precise_stress(size, align, offset, false) + let acquire_res = self + .space + .read() + .acquire(self.tls, bytes_to_pages(block_size)); + match acquire_res { + Ok(acquired_start) => { + trace!( + "Acquired a new block of size {} with start address {}", + block_size, + acquired_start + ); + if !stress_test { + self.set_limit(acquired_start, acquired_start + block_size); + self.alloc(size, align, offset) + } else { + // For a stress test, we artificially make the fastpath fail by + // manipulating the limit as below. + // The assumption here is that we use an address range such that + // cursor > block_size always. + self.set_limit(acquired_start, unsafe { Address::from_usize(block_size) }); + // Note that we have just acquired a new block so we know that we don't have to go + // through the entire allocation sequence again, we can directly call the slow path + // allocation. + self.alloc_slow_once_precise_stress(size, align, offset, false) + } + } + Err(_) => { + use crate::vm::Collection; + trace!("Failed to acquire a new block"); + VM::VMCollection::block_for_gc(VMMutatorThread(self.tls)); + Address::ZERO } } } diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 6d302af98d..d96c1f2c57 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -6,7 +6,9 @@ use crate::policy::marksweepspace::native_ms::*; use crate::util::alloc::allocator; use crate::util::alloc::Allocator; use crate::util::linear_scan::Region; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::Address; +use crate::util::VMMutatorThread; use crate::util::VMThread; use crate::vm::VMBinding; @@ -16,7 +18,7 @@ use super::allocator::AllocatorContext; #[repr(C)] pub struct FreeListAllocator { pub tls: VMThread, - space: &'static MarkSweepSpace, + space: ArcFlexMut>, context: Arc>, /// blocks with free space pub available_blocks: BlockLists, @@ -37,10 +39,6 @@ impl Allocator for FreeListAllocator { self.tls } - fn get_space(&self) -> &'static dyn crate::policy::space::Space { - self.space - } - fn get_context(&self) -> &AllocatorContext { &self.context } @@ -130,7 +128,7 @@ impl FreeListAllocator { // New free list allcoator pub(crate) fn new( tls: VMThread, - space: &'static MarkSweepSpace, + space: ArcFlexMut>, context: Arc>, ) -> Self { FreeListAllocator { @@ -294,9 +292,12 @@ impl FreeListAllocator { ) -> Option { let bin = mi_bin::(size, align); loop { - match self.space.acquire_block(self.tls, size, align) { + let block = self.space.read().acquire_block(self.tls, size, align); + match block { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => { // GC + use crate::vm::Collection; + VM::VMCollection::block_for_gc(VMMutatorThread(self.tls)); return None; } @@ -332,7 +333,7 @@ impl FreeListAllocator { } fn init_block(&self, block: Block, cell_size: usize) { - self.space.record_new_block(block); + self.space.read().record_new_block(block); // construct free list let block_end = block.start() + Block::BYTES; @@ -458,12 +459,13 @@ impl FreeListAllocator { debug!("reset"); // sweep all blocks and push consumed onto available list for bin in 0..MI_BIN_FULL { + let space = self.space.read(); // Sweep available blocks - self.available_blocks[bin].sweep_blocks(self.space); - self.available_blocks_stress[bin].sweep_blocks(self.space); + self.available_blocks[bin].sweep_blocks(&space); + self.available_blocks_stress[bin].sweep_blocks(&space); // Sweep consumed blocks, and also push the blocks back to the available list. - self.consumed_blocks[bin].sweep_blocks(self.space); + self.consumed_blocks[bin].sweep_blocks(&space); if *self.context.options.precise_stress && self.context.options.is_stress_test_gc_enabled() { @@ -483,7 +485,8 @@ impl FreeListAllocator { } fn abandon_blocks(&mut self) { - let mut abandoned = self.space.abandoned.lock().unwrap(); + let space = self.space.read(); + let mut abandoned = space.abandoned.lock().unwrap(); for i in 0..MI_BIN_FULL { let available = self.available_blocks.get_mut(i).unwrap(); if !available.is_empty() { diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 077aaefbd9..0e196c5610 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -4,11 +4,11 @@ use super::allocator::{align_allocation_no_fill, fill_alignment_gap, AllocatorCo use super::BumpPointer; use crate::policy::immix::line::*; use crate::policy::immix::ImmixSpace; -use crate::policy::space::Space; use crate::util::alloc::allocator::get_maximum_aligned_size; use crate::util::alloc::Allocator; use crate::util::linear_scan::Region; use crate::util::opaque_pointer::VMThread; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::rust_util::unlikely; use crate::util::Address; use crate::vm::*; @@ -19,7 +19,7 @@ pub struct ImmixAllocator { pub tls: VMThread, pub bump_pointer: BumpPointer, /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. - space: &'static ImmixSpace, + pub(crate) space: ArcFlexMut>, context: Arc>, /// *unused* hot: bool, @@ -43,10 +43,6 @@ impl ImmixAllocator { } impl Allocator for ImmixAllocator { - fn get_space(&self) -> &'static dyn Space { - self.space as _ - } - fn get_context(&self) -> &AllocatorContext { &self.context } @@ -166,13 +162,13 @@ impl Allocator for ImmixAllocator { impl ImmixAllocator { pub(crate) fn new( tls: VMThread, - space: Option<&'static dyn Space>, + space: ArcFlexMut>, context: Arc>, copy: bool, ) -> Self { ImmixAllocator { tls, - space: space.unwrap().downcast_ref::>().unwrap(), + space, context, bump_pointer: BumpPointer::default(), hot: false, @@ -183,10 +179,6 @@ impl ImmixAllocator { } } - pub fn immix_space(&self) -> &'static ImmixSpace { - self.space - } - /// Large-object (larger than a line) bump allocation. fn overflow_alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("{:?}: overflow_alloc", self.tls); @@ -234,8 +226,7 @@ impl ImmixAllocator { fn acquire_recyclable_lines(&mut self, size: usize, align: usize, offset: usize) -> bool { while self.line.is_some() || self.acquire_recyclable_block() { let line = self.line.unwrap(); - if let Some((start_line, end_line)) = self.immix_space().get_next_available_lines(line) - { + if let Some((start_line, end_line)) = self.space.read().get_next_available_lines(line) { // Find recyclable lines. Update the bump allocation cursor and limit. self.bump_pointer.cursor = start_line.start(); self.bump_pointer.limit = end_line.start(); @@ -274,7 +265,7 @@ impl ImmixAllocator { /// Get a recyclable block from ImmixSpace. fn acquire_recyclable_block(&mut self) -> bool { - match self.immix_space().get_reusable_block(self.copy) { + match self.space.read().get_reusable_block(self.copy) { Some(block) => { trace!("{:?}: acquire_recyclable_block -> {:?}", self.tls, block); // Set the hole-searching cursor to the start of this block. @@ -287,9 +278,9 @@ impl ImmixAllocator { // Get a clean block from ImmixSpace. fn acquire_clean_block(&mut self, size: usize, align: usize, offset: usize) -> Address { - match self.immix_space().get_clean_block(self.tls, self.copy) { - None => Address::ZERO, - Some(block) => { + let alloc_res = self.space.read().get_clean_block(self.tls, self.copy); + match alloc_res { + Ok(block) => { trace!( "{:?}: Acquired a new block {:?} -> {:?}", self.tls, @@ -305,6 +296,10 @@ impl ImmixAllocator { } self.alloc(size, align, offset) } + Err(_) => { + VM::VMCollection::block_for_gc(crate::util::VMMutatorThread(self.tls)); + Address::ZERO + } } } diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index 9a6bf6cb30..0082be2537 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -4,6 +4,7 @@ use crate::policy::largeobjectspace::LargeObjectSpace; use crate::policy::space::Space; use crate::util::alloc::{allocator, Allocator}; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::Address; use crate::vm::VMBinding; @@ -14,7 +15,7 @@ pub struct LargeObjectAllocator { /// [`VMThread`] associated with this allocator instance pub tls: VMThread, /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. - space: &'static LargeObjectSpace, + space: ArcFlexMut>, context: Arc>, } @@ -27,11 +28,6 @@ impl Allocator for LargeObjectAllocator { &self.context } - fn get_space(&self) -> &'static dyn Space { - // Casting the interior of the Option: from &LargeObjectSpace to &dyn Space - self.space as &'static dyn Space - } - fn does_thread_local_allocation(&self) -> bool { false } @@ -47,20 +43,28 @@ impl Allocator for LargeObjectAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address { - if self.space.will_oom_on_acquire(self.tls, size) { + if self.space.read().will_oom_on_acquire(self.tls, size) { return Address::ZERO; } let maxbytes = allocator::get_maximum_aligned_size::(size, align); let pages = crate::util::conversions::bytes_to_pages_up(maxbytes); - self.space.allocate_pages(self.tls, pages) + let alloc_res = self.space.read().allocate_pages(self.tls, pages); + match alloc_res { + Ok(addr) => addr, + Err(_) => { + use crate::vm::Collection; + VM::VMCollection::block_for_gc(VMMutatorThread(self.tls)); + Address::ZERO + } + } } } impl LargeObjectAllocator { pub(crate) fn new( tls: VMThread, - space: &'static LargeObjectSpace, + space: ArcFlexMut>, context: Arc>, ) -> Self { LargeObjectAllocator { diff --git a/src/util/alloc/malloc_allocator.rs b/src/util/alloc/malloc_allocator.rs index f88ec592f7..8d78c451ac 100644 --- a/src/util/alloc/malloc_allocator.rs +++ b/src/util/alloc/malloc_allocator.rs @@ -1,9 +1,9 @@ use std::sync::Arc; use crate::policy::marksweepspace::malloc_ms::MallocSpace; -use crate::policy::space::Space; use crate::util::alloc::Allocator; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::Address; use crate::vm::VMBinding; @@ -14,15 +14,11 @@ pub struct MallocAllocator { /// [`VMThread`] associated with this allocator instance pub tls: VMThread, /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. - space: &'static MallocSpace, + space: ArcFlexMut>, context: Arc>, } impl Allocator for MallocAllocator { - fn get_space(&self) -> &'static dyn Space { - self.space as &'static dyn Space - } - fn get_context(&self) -> &AllocatorContext { &self.context } @@ -40,14 +36,21 @@ impl Allocator for MallocAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { - self.space.alloc(self.tls, size, align, offset) + self.space + .read() + .alloc(self.tls, size, align, offset) + .unwrap_or_else(|_| { + use crate::vm::Collection; + VM::VMCollection::block_for_gc(VMMutatorThread(self.tls)); + Address::ZERO + }) } } impl MallocAllocator { pub(crate) fn new( tls: VMThread, - space: &'static MallocSpace, + space: ArcFlexMut>, context: Arc>, ) -> Self { MallocAllocator { diff --git a/src/util/alloc/markcompact_allocator.rs b/src/util/alloc/markcompact_allocator.rs index a26d7dda26..54f6452a91 100644 --- a/src/util/alloc/markcompact_allocator.rs +++ b/src/util/alloc/markcompact_allocator.rs @@ -5,6 +5,7 @@ use super::BumpAllocator; use crate::policy::space::Space; use crate::util::alloc::Allocator; use crate::util::opaque_pointer::*; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::Address; use crate::vm::VMBinding; @@ -24,16 +25,12 @@ impl MarkCompactAllocator { self.bump_allocator.reset(); } - pub fn rebind(&mut self, space: &'static dyn Space) { + pub fn rebind(&mut self, space: ArcFlexMut>) { self.bump_allocator.rebind(space); } } impl Allocator for MarkCompactAllocator { - fn get_space(&self) -> &'static dyn Space { - self.bump_allocator.get_space() - } - fn get_context(&self) -> &AllocatorContext { &self.bump_allocator.context } @@ -93,7 +90,7 @@ impl MarkCompactAllocator { crate::policy::markcompactspace::MarkCompactSpace::::HEADER_RESERVED_IN_BYTES; pub(crate) fn new( tls: VMThread, - space: &'static dyn Space, + space: ArcFlexMut>, context: Arc>, ) -> Self { MarkCompactAllocator { diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index e7e1226a41..c5360168a0 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -3,13 +3,12 @@ use std::sync::Arc; use crate::plan::PlanConstraints; use crate::policy::copy_context::PolicyCopyContext; -use crate::policy::copyspace::CopySpace; use crate::policy::copyspace::CopySpaceCopyContext; -use crate::policy::immix::ImmixSpace; use crate::policy::immix::{ImmixCopyContext, ImmixHybridCopyContext}; use crate::policy::space::Space; use crate::util::object_forwarding; use crate::util::opaque_pointer::VMWorkerThread; +use crate::util::rust_util::flex_mut::ArcFlexMut; use crate::util::{Address, ObjectReference}; use crate::vm::ObjectModel; use crate::vm::VMBinding; @@ -25,7 +24,7 @@ const MAX_COPYSPACE_COPY_ALLOCATORS: usize = 1; const MAX_IMMIX_COPY_ALLOCATORS: usize = 1; const MAX_IMMIX_HYBRID_COPY_ALLOCATORS: usize = 1; -type CopySpaceMapping = Vec<(CopySelector, &'static dyn Space)>; +type CopySpaceMapping = Vec<(CopySelector, ArcFlexMut>)>; /// A configuration for GCWorkerCopyContext. /// Similar to a `MutatorConfig`, @@ -188,27 +187,28 @@ impl GCWorkerCopyContext { let context = Arc::new(AllocatorContext::new(mmtk)); // Initiate the copy context for each policy based on the space mapping. - for &(selector, space) in ret.config.space_mapping.iter() { + for (selector, space) in ret.config.space_mapping.iter() { + let space = space.clone(); match selector { CopySelector::CopySpace(index) => { - ret.copy[index as usize].write(CopySpaceCopyContext::new( + ret.copy[*index as usize].write(CopySpaceCopyContext::new( worker_tls, context.clone(), - space.downcast_ref::>().unwrap(), + space.downcast(), )); } CopySelector::Immix(index) => { - ret.immix[index as usize].write(ImmixCopyContext::new( + ret.immix[*index as usize].write(ImmixCopyContext::new( worker_tls, context.clone(), - space.downcast_ref::>().unwrap(), + space.downcast(), )); } CopySelector::ImmixHybrid(index) => { - ret.immix_hybrid[index as usize].write(ImmixHybridCopyContext::new( + ret.immix_hybrid[*index as usize].write(ImmixHybridCopyContext::new( worker_tls, context.clone(), - space.downcast_ref::>().unwrap(), + space.downcast(), )); } CopySelector::Unused => unreachable!(), diff --git a/src/util/rust_util/flex_mut.rs b/src/util/rust_util/flex_mut.rs new file mode 100644 index 0000000000..128a2fc6ed --- /dev/null +++ b/src/util/rust_util/flex_mut.rs @@ -0,0 +1,284 @@ +use crate::policy::space::Space; +use crate::vm::VMBinding; + +use downcast_rs::Downcast; +use std::{ops::Deref, ops::DerefMut, sync::Arc}; + +/// `ArcFlexMut` is a replacement for `UnsafeCell` for a shared reference in situations where 1. +/// their mutability is managed by the programmer, 2. mutability is hard to reason about statically, +/// 3. using locks or `RefCell`/`AtomicRefCell` is not plausible for the sake of performance, and +/// 4. the shared reference could be both statically typed and a dyn ref to a trait object, depending +/// on where it is used. +/// `ArcFlexMut` does not guarantee thread safety, and it does not provide any actual locking. +/// It provides methods for acquiring a read or write guard, and can optionally check if there is +/// any possible data race. Without the checks, in a release build, `ArcFlexMut` should perform +/// as efficient as `UnsafeCell`. +/// We currently use this type for [`crate::policy::space::Space`]s. +#[repr(transparent)] +pub struct ArcFlexMut +where + T: ?Sized, +{ + inner: Arc>, +} + +impl ArcFlexMut { + /// Create a shared reference to the object. + pub fn new(v: T) -> Self { + Self { + inner: Arc::new(peace_lock::RwLock::new(v)), + } + } +} + +impl ArcFlexMut { + /// Acquire a read guard to get immutable access to the data. It is allowed to have a reader when there is no writer. + /// If the feature `check_flex_mut` is enabled, the method will panic if the rule is violated. + pub fn read(&self) -> ArcFlexMutReadGuard<'_, T> { + ArcFlexMutReadGuard { + inner: self.inner.read(), + } + } + + /// Acquire a write guard to get mutable access to the data. It is allowed to have a writer when there is no other writer or reader. + /// If the feature `check_flex_mut` is enabled, the method will panic if the rule is violated. + pub fn write(&self) -> ArcFlexMutWriteGuard<'_, T> { + ArcFlexMutWriteGuard { + inner: self.inner.write(), + } + } +} + +impl Clone for ArcFlexMut +where + T: ?Sized, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +// For types that implements `Downcast`, we can turn the shared reference into a reference of a concrete type. + +impl ArcFlexMut { + /// Is it allowed to downcast to the given type? + fn can_downcast(&self) -> bool { + let lock = self.inner.read(); + (*lock).as_any().downcast_ref::().is_some() + } + + /// Downcast the shared reference into a shared reference of a concrete type. The new reference share + /// the count and the lock with the old consumed reference. + pub fn downcast(self) -> ArcFlexMut { + if self.can_downcast::() { + let raw = Arc::into_raw(self.inner); + let new_inner = unsafe { Arc::from_raw(raw as *const peace_lock::RwLock) }; + ArcFlexMut { inner: new_inner } + } else { + panic!("Failed to downcast") + } + } +} + +// Methods to turn the shared reference into a shared reference of a trait object. +// The references points to the same object with the same count. +// This impl block is a workaround to implement the functionality specifically for +// `dyn Space`, as I can't find a way to implement this using generics. + +macro_rules! to_trait_object { + ($self: expr, $trait: ty) => {{ + let inner = $self.inner; + let raw = Arc::into_raw(inner); + let new_inner = unsafe { Arc::from_raw(raw as *const peace_lock::RwLock<$trait>) }; + ArcFlexMut { inner: new_inner } + }}; +} + +impl ArcFlexMut { + pub fn into_dyn_space(self) -> ArcFlexMut> + where + T: 'static + Space, + { + to_trait_object!(self, dyn Space) + } +} + +/// Read guard for ArcFlexMut +pub struct ArcFlexMutReadGuard<'a, T> +where + T: ?Sized, +{ + inner: peace_lock::RwLockReadGuard<'a, T>, +} + +impl Deref for ArcFlexMutReadGuard<'_, T> +where + T: ?Sized, +{ + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.inner.deref() + } +} + +/// Write guard for ArcFlexMut +pub struct ArcFlexMutWriteGuard<'a, T> +where + T: ?Sized, +{ + inner: peace_lock::RwLockWriteGuard<'a, T>, +} + +impl Deref for ArcFlexMutWriteGuard<'_, T> +where + T: ?Sized, +{ + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.inner.deref() + } +} + +impl DerefMut for ArcFlexMutWriteGuard<'_, T> +where + T: ?Sized, +{ + #[inline] + fn deref_mut(&mut self) -> &mut T { + self.inner.deref_mut() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + struct Foo(usize); + trait Bar: 'static + Downcast { + fn get(&self) -> usize; + fn set(&mut self, v: usize); + } + impl Bar for Foo { + fn get(&self) -> usize { + self.0 + } + fn set(&mut self, v: usize) { + self.0 = v; + } + } + + impl ArcFlexMut { + fn into_dyn_bar(self) -> ArcFlexMut + where + T: 'static + Bar, + { + to_trait_object!(self, dyn Bar) + } + } + + #[allow(clippy::redundant_clone)] // Allow redundant clone for testing the count + #[test] + fn create_clone_drop() { + let r = ArcFlexMut::new(Foo(42)); + assert_eq!(Arc::strong_count(&r.inner), 1); + + { + let r2 = r.clone(); + assert_eq!(r2.inner.read().get(), 42); + assert_eq!(Arc::strong_count(&r2.inner), 2); + } + assert_eq!(Arc::strong_count(&r.inner), 1); + } + + #[test] + fn to_trait_object() { + let r: ArcFlexMut = ArcFlexMut::new(Foo(42)); + assert_eq!(Arc::strong_count(&r.inner), 1); + + let trait_obj: ArcFlexMut = r.clone().into_dyn_bar(); + assert_eq!(Arc::strong_count(&r.inner), 2); + assert_eq!(r.inner.read().get(), 42); + assert_eq!(Arc::strong_count(&trait_obj.inner), 2); + assert_eq!(trait_obj.inner.read().get(), 42); + + drop(trait_obj); + assert_eq!(Arc::strong_count(&r.inner), 1); + } + + #[test] + fn downcast() { + let r = ArcFlexMut::new(Foo(42)); + let trait_obj: ArcFlexMut = r.into_dyn_bar(); + assert_eq!(Arc::strong_count(&trait_obj.inner), 1); + + let trait_obj_clone = trait_obj.clone(); + assert_eq!(Arc::strong_count(&trait_obj.inner), 2); + + let downcast: ArcFlexMut = trait_obj_clone.downcast::(); + assert_eq!(Arc::strong_count(&trait_obj.inner), 2); + assert_eq!(Arc::strong_count(&downcast.inner), 2); + assert_eq!(downcast.inner.read().get(), 42); + } + + #[test] + fn read() { + let r = ArcFlexMut::new(Foo(42)); + assert_eq!(r.read().get(), 42); + + let read1 = r.read(); + let read2 = r.read(); + assert_eq!(read1.get(), 42); + assert_eq!(read2.get(), 42); + } + + #[allow(clippy::redundant_clone)] // Allow redundant clone for testing the count + #[test] + fn write() { + let r = ArcFlexMut::new(Foo(42)); + let r2 = r.clone(); + let trait_obj = r.clone().into_dyn_bar(); + let downcast = trait_obj.clone().downcast::(); + assert_eq!(Arc::strong_count(&r.inner), 4); + + r.write().set(1); + assert_eq!(r.read().get(), 1); + assert_eq!(r2.read().get(), 1); + assert_eq!(trait_obj.read().get(), 1); + assert_eq!(downcast.read().get(), 1); + } + + #[test] + fn multiple_readers() { + let r = ArcFlexMut::new(Foo(42)); + let read1 = r.read(); + let read2 = r.read(); + assert_eq!(read1.get(), 42); + assert_eq!(read2.get(), 42); + } + + #[test] + #[cfg_attr(debug_assertions, should_panic)] + fn multiple_writers() { + let r = ArcFlexMut::new(Foo(42)); + let write1 = r.write(); + let write2 = r.write(); + assert_eq!(write1.get(), 42); + assert_eq!(write2.get(), 42); + } + + #[test] + #[cfg_attr(debug_assertions, should_panic)] + fn mix_reader_writer() { + let r = ArcFlexMut::new(Foo(42)); + let read = r.read(); + let write = r.write(); + assert_eq!(read.get(), 42); + assert_eq!(write.get(), 42); + } +} diff --git a/src/util/rust_util/mod.rs b/src/util/rust_util/mod.rs index e3596ff4a1..3bef788e1b 100644 --- a/src/util/rust_util/mod.rs +++ b/src/util/rust_util/mod.rs @@ -2,6 +2,7 @@ //! functionalities that we may expect the Rust programming language and its standard libraries //! to provide. +pub mod flex_mut; pub mod rev_group; pub mod zeroed_alloc; diff --git a/vmbindings/dummyvm/Cargo.toml b/vmbindings/dummyvm/Cargo.toml index a1a3af254b..be5251e71f 100644 --- a/vmbindings/dummyvm/Cargo.toml +++ b/vmbindings/dummyvm/Cargo.toml @@ -40,3 +40,4 @@ extreme_assertions = ["mmtk/extreme_assertions"] # Feature to control which benchmarks to run. See benches/main.rs bench_sft = [] bench_alloc = [] +bench_post_alloc = [] diff --git a/vmbindings/dummyvm/benches/bench_post_alloc.rs b/vmbindings/dummyvm/benches/bench_post_alloc.rs new file mode 100644 index 0000000000..d77ac763e2 --- /dev/null +++ b/vmbindings/dummyvm/benches/bench_post_alloc.rs @@ -0,0 +1,20 @@ +use criterion::{criterion_group, Criterion}; + +use mmtk::plan::AllocationSemantics; +use mmtk::vm::ObjectModel; +use mmtk_dummyvm::api; +use mmtk_dummyvm::test_fixtures::MutatorFixture; + +fn post_alloc(c: &mut Criterion) { + // 1GB so we are unlikely to OOM + let fixture = MutatorFixture::create_with_heapsize(1 << 30); + let addr = api::mmtk_alloc(fixture.mutator, 16, 4, 0, AllocationSemantics::Default); + let obj = mmtk_dummyvm::object_model::VMObjectModel::address_to_ref(addr); + c.bench_function("post_alloc", |b| { + b.iter(|| { + api::mmtk_post_alloc(fixture.mutator, obj, 8, AllocationSemantics::Default); + }) + }); +} + +criterion_group!(benches, post_alloc); diff --git a/vmbindings/dummyvm/benches/main.rs b/vmbindings/dummyvm/benches/main.rs index 804379ee1b..49864ee873 100644 --- a/vmbindings/dummyvm/benches/main.rs +++ b/vmbindings/dummyvm/benches/main.rs @@ -13,3 +13,8 @@ criterion_main!(bench_sft::benches); mod bench_alloc; #[cfg(feature = "bench_alloc")] criterion_main!(bench_alloc::benches); + +#[cfg(feature = "bench_post_alloc")] +mod bench_post_alloc; +#[cfg(feature = "bench_post_alloc")] +criterion_main!(bench_post_alloc::benches);