diff --git a/Cargo.toml b/Cargo.toml index 4abf95ebcf..04454fa85a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ repository = "https://github.com/mmtk/mmtk-core" readme = "README.md" categories = ["memory-management"] keywords = ["gc", "garbage", "collection", "garbage-collection", "allocation"] -rust-version = "1.66.0" +rust-version = "1.70.0" build = "build.rs" [lib] @@ -42,6 +42,7 @@ num_cpus = "1.8" num-traits = "0.2" pfm = { version = "0.1.0-beta.3", optional = true } probe = "0.5" +portable-atomic = "1.4.3" regex = "1.7.0" spin = "0.9.5" static_assertions = "1.1.0" diff --git a/src/mmtk.rs b/src/mmtk.rs index b7fef582d0..bf3d87c732 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -108,6 +108,7 @@ impl MMTK { pub fn new(options: Arc) -> Self { // Initialize SFT first in case we need to use this in the constructor. // The first call will initialize SFT map. Other calls will be blocked until SFT map is initialized. + crate::policy::sft_map::SFTRefStorage::pre_use_check(); SFT_MAP.initialize_once(&create_sft_map); let num_workers = if cfg!(feature = "single_worker") { diff --git a/src/plan/global.rs b/src/plan/global.rs index c230c5891b..df887488dd 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -127,7 +127,12 @@ pub fn create_plan( } // Each space now has a fixed address for its lifetime. It is safe now to initialize SFT. - plan.for_each_space(&mut |s| s.initialize_sft()); + let sft_map: &mut dyn crate::policy::sft_map::SFTMap = + unsafe { crate::mmtk::SFT_MAP.get_mut() }.as_mut(); + plan.for_each_space(&mut |s| { + sft_map.notify_space_creation(s.as_sft()); + s.initialize_sft(sft_map); + }); plan } diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 5e1ff0cbc2..4d45beb4c0 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -105,8 +105,8 @@ impl Space for CopySpace { &self.common } - fn initialize_sft(&self) { - self.common().initialize_sft(self.as_sft()) + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { + self.common().initialize_sft(self.as_sft(), sft_map) } fn release_multiple_pages(&mut self, _start: Address) { diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 2cfc4256c9..01354d920d 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -4,6 +4,7 @@ use crate::plan::VectorObjectQueue; use crate::policy::gc_work::TraceKind; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; +use crate::policy::sft_map::SFTMap; use crate::policy::space::{CommonSpace, Space}; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::copy::*; @@ -172,8 +173,8 @@ impl Space for ImmixSpace { fn common(&self) -> &CommonSpace { &self.common } - fn initialize_sft(&self) { - self.common().initialize_sft(self.as_sft()) + fn initialize_sft(&self, sft_map: &mut dyn SFTMap) { + self.common().initialize_sft(self.as_sft(), sft_map) } fn release_multiple_pages(&mut self, _start: Address) { panic!("immixspace only releases pages enmasse") diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index a131750366..1162229d56 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -91,8 +91,8 @@ impl Space for ImmortalSpace { &self.common } - fn initialize_sft(&self) { - self.common().initialize_sft(self.as_sft()) + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { + self.common().initialize_sft(self.as_sft(), sft_map) } fn release_multiple_pages(&mut self, _start: Address) { diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 745f6b3db3..ec6b2f7506 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -107,8 +107,8 @@ impl Space for LargeObjectSpace { &self.pr } - fn initialize_sft(&self) { - self.common().initialize_sft(self.as_sft()) + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { + self.common().initialize_sft(self.as_sft(), sft_map) } fn common(&self) -> &CommonSpace { diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index ba1dc6fff4..bd2cabad40 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -3,7 +3,6 @@ use atomic::Atomic; use std::marker::PhantomData; use std::sync::atomic::Ordering; -use crate::mmtk::SFT_MAP; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; @@ -104,8 +103,8 @@ impl Space for LockFreeImmortalSpace { panic!("immortalspace only releases pages enmasse") } - fn initialize_sft(&self) { - unsafe { SFT_MAP.update(self.as_sft(), self.start, self.extent) }; + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { + unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.extent) }; } fn reserved_pages(&self) -> usize { diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index ecc7b2035d..d5b0d7facd 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -111,8 +111,8 @@ impl Space for MarkCompactSpace { &self.common } - fn initialize_sft(&self) { - self.common().initialize_sft(self.as_sft()) + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { + self.common().initialize_sft(self.as_sft(), sft_map) } fn release_multiple_pages(&mut self, _start: Address) { diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 9b9a3eeb74..7454fbb287 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -145,7 +145,7 @@ impl Space for MallocSpace { self.gc_trigger.as_ref() } - fn initialize_sft(&self) { + fn initialize_sft(&self, _sft_map: &mut dyn crate::policy::sft_map::SFTMap) { // Do nothing - we will set sft when we get new results from malloc } diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index d72cc5a131..8d8eae7d0e 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -156,8 +156,8 @@ impl Space for MarkSweepSpace { &self.pr } - fn initialize_sft(&self) { - self.common().initialize_sft(self.as_sft()) + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { + self.common().initialize_sft(self.as_sft(), sft_map) } fn common(&self) -> &CommonSpace { diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index 8df090a8e2..37225b1384 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -2,6 +2,8 @@ use super::sft::*; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; +use std::sync::atomic::Ordering; + /// SFTMap manages the SFT table, and mapping between addresses with indices in the table. The trait allows /// us to have multiple implementations of the SFT table. pub trait SFTMap { @@ -34,7 +36,10 @@ pub trait SFTMap { /// # Safety /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range. /// Otherwise, the caller should check with `has_sft_entry()` before calling this method. - unsafe fn update(&self, space: *const (dyn SFT + Sync + 'static), start: Address, bytes: usize); + unsafe fn update(&self, space: SFTRawPointer, start: Address, bytes: usize); + + /// Notify the SFT map for space creation. `DenseChunkMap` needs to create an entry for the space. + fn notify_space_creation(&mut self, _space: SFTRawPointer) {} /// Eagerly initialize the SFT table. For most implementations, it could be the same as update(). /// However, we need this as a seprate method for SFTDenseChunkMap, as it needs to map side metadata first @@ -44,7 +49,7 @@ pub trait SFTMap { /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range. /// Otherwise, the caller should check with `has_sft_entry()` before calling this method. unsafe fn eager_initialize( - &self, + &mut self, space: *const (dyn SFT + Sync + 'static), start: Address, bytes: usize, @@ -80,16 +85,75 @@ pub(crate) fn create_sft_map() -> Box { } } +/// The raw pointer for SFT. We expect a space to provide this to SFT map. +pub(crate) type SFTRawPointer = *const (dyn SFT + Sync + 'static); + +/// We store raw pointer as a double word using atomics. +/// We use portable_atomic. It provides non locking atomic operations where possible, +/// and use a locking operation as the fallback. +/// Rust only provides AtomicU128 for some platforms, and do not provide the type +/// on x86_64-linux, as some earlier x86_64 CPUs do not have 128 bits atomic instructions. +/// The crate portable_atomic works around the problem with a runtime detection to +/// see if 128 bits atomic instructions are available. +#[cfg(target_pointer_width = "64")] +type AtomicDoubleWord = portable_atomic::AtomicU128; +#[cfg(target_pointer_width = "32")] +type AtomicDoubleWord = portable_atomic::AtomicU64; + +/// The type we store SFT raw pointer as. It basically just double word sized atomic integer. +/// This type provides an abstraction so we can access SFT easily. +#[repr(transparent)] +pub(crate) struct SFTRefStorage(AtomicDoubleWord); + +impl SFTRefStorage { + /// A check at boot time to ensure `SFTRefStorage` is correct. + pub fn pre_use_check() { + // If we do not have lock free operations, warn the users. + if !AtomicDoubleWord::is_lock_free() { + warn!( + "SFT access word is not lock free on this platform. This will slow down SFT map." + ); + } + // Our storage type needs to be the same width as the dyn pointer type. + assert_eq!( + std::mem::size_of::(), + std::mem::size_of::() + ); + } + + pub fn new(sft: SFTRawPointer) -> Self { + let val = unsafe { std::mem::transmute(sft) }; + Self(AtomicDoubleWord::new(val)) + } + + // Load with the acquire ordering. + pub fn load(&self) -> &dyn SFT { + let val = self.0.load(Ordering::Acquire); + unsafe { std::mem::transmute(val) } + } + + // Store a raw SFT pointer with the release ordering. + pub fn store(&self, sft: SFTRawPointer) { + let val = unsafe { std::mem::transmute(sft) }; + self.0.store(val, Ordering::Release) + } +} + +impl std::default::Default for SFTRefStorage { + fn default() -> Self { + Self::new(&EMPTY_SPACE_SFT as SFTRawPointer) + } +} + #[allow(dead_code)] #[cfg(target_pointer_width = "64")] // This impl only works for 64 bits: 1. the mask is designed for our 64bit heap range, 2. on 64bits, all our spaces are contiguous. mod space_map { use super::*; use crate::util::heap::layout::vm_layout::vm_layout; - use std::cell::UnsafeCell; /// Space map is a small table, and it has one entry for each MMTk space. pub struct SFTSpaceMap { - sft: UnsafeCell>, + sft: Vec, } unsafe impl Sync for SFTSpaceMap {} @@ -107,12 +171,13 @@ mod space_map { fn get_checked(&self, address: Address) -> &dyn SFT { // We should be able to map the entire address range to indices in the table. - debug_assert!(Self::addr_to_index(address) < unsafe { (*self.sft.get()).len() }); - unsafe { &**(*self.sft.get()).get_unchecked(Self::addr_to_index(address)) } + debug_assert!(Self::addr_to_index(address) < self.sft.len()); + unsafe { self.get_unchecked(address) } } unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT { - &**(*self.sft.get()).get_unchecked(Self::addr_to_index(address)) + let cell = unsafe { self.sft.get_unchecked(Self::addr_to_index(address)) }; + cell.load() } unsafe fn update( @@ -125,7 +190,7 @@ mod space_map { let index = Self::addr_to_index(start); if cfg!(debug_assertions) { // Make sure we only update from empty to a valid space, or overwrite the space - let old = (*self.sft.get())[index]; + let old = self.sft[index].load(); assert!((*old).name() == EMPTY_SFT_NAME || (*old).name() == (*space).name()); // Make sure the range is in the space let space_start = Self::index_to_space_start(index); @@ -138,12 +203,12 @@ mod space_map { } } - *(*self.sft.get()).get_unchecked_mut(index) = std::mem::transmute(space); + self.sft.get_unchecked(index).store(space); } unsafe fn clear(&self, addr: Address) { let index = Self::addr_to_index(addr); - *(*self.sft.get()).get_unchecked_mut(index) = &EMPTY_SPACE_SFT; + self.sft.get_unchecked(index).store(&EMPTY_SPACE_SFT as _); } } @@ -154,7 +219,9 @@ mod space_map { let table_size = Self::addr_to_index(Address::MAX) + 1; debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES); Self { - sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; table_size]), + sft: std::iter::repeat_with(SFTRefStorage::default) + .take(table_size) + .collect(), } } @@ -224,7 +291,6 @@ mod dense_chunk_map { use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata::spec_defs::SFT_DENSE_CHUNK_MAP_INDEX; use crate::util::metadata::side_metadata::*; - use std::cell::UnsafeCell; use std::collections::HashMap; use std::sync::atomic::Ordering; @@ -239,10 +305,10 @@ mod dense_chunk_map { pub struct SFTDenseChunkMap { /// The dense table, one entry per space. We use side metadata to store the space index for each chunk. /// 0 is EMPTY_SPACE_SFT. - sft: UnsafeCell>, + sft: Vec, /// A map from space name (assuming they are unique) to their index. We use this to know whether we have /// pushed &dyn SFT for a space, and to know its index. - index_map: UnsafeCell>, + index_map: HashMap, } unsafe impl Sync for SFTDenseChunkMap {} @@ -251,7 +317,7 @@ mod dense_chunk_map { fn has_sft_entry(&self, addr: Address) -> bool { if SFT_DENSE_CHUNK_MAP_INDEX.is_mapped(addr) { let index = Self::addr_to_index(addr); - index < self.sft().len() as u8 + index < self.sft.len() as u8 } else { // We haven't mapped side metadata for the chunk, so we do not have an SFT entry for the address. false @@ -264,28 +330,33 @@ mod dense_chunk_map { fn get_checked(&self, address: Address) -> &dyn SFT { if self.has_sft_entry(address) { - unsafe { - &**self - .sft() - .get_unchecked(Self::addr_to_index(address) as usize) - } + unsafe { self.get_unchecked(address) } } else { &EMPTY_SPACE_SFT } } unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT { - &**self - .sft() - .get_unchecked(Self::addr_to_index(address) as usize) + let cell = self + .sft + .get_unchecked(Self::addr_to_index(address) as usize); + cell.load() } - unsafe fn eager_initialize( - &self, - space: *const (dyn SFT + Sync + 'static), - start: Address, - bytes: usize, - ) { + fn notify_space_creation(&mut self, space: SFTRawPointer) { + // Insert the space into the SFT table, and the SFT map. + + let space_name = unsafe { &*space }.name().to_string(); + // We shouldn't have this space in our map yet. Otherwise, this method is called multiple times for the same space. + assert!(self.index_map.get(&space_name).is_none()); + // Index for the space + let index = self.sft.len(); + // Insert to hashmap and vec + self.sft.push(SFTRefStorage::new(space)); + self.index_map.insert(space_name, index); + } + + unsafe fn eager_initialize(&mut self, space: SFTRawPointer, start: Address, bytes: usize) { let context = SideMetadataContext { global: vec![SFT_DENSE_CHUNK_MAP_INDEX], local: vec![], @@ -303,15 +374,7 @@ mod dense_chunk_map { start: Address, bytes: usize, ) { - // Check if we have an entry in self.sft for the space. If so, get the index. - // If not, push the space pointer to the table and add an entry to the hahs map. - let index: u8 = *(*self.index_map.get()) - .entry((*space).name().to_string()) - .or_insert_with(|| { - let count = self.sft().len(); - (*self.sft.get()).push(space); - count - }) as u8; + let index: u8 = *self.index_map.get((*space).name()).unwrap() as u8; // Iterate through the chunks and record the space index in the side metadata. let first_chunk = conversions::chunk_align_down(start); @@ -348,29 +411,19 @@ mod dense_chunk_map { pub fn new() -> Self { Self { /// Empty space is at index 0 - sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT]), - index_map: UnsafeCell::new(HashMap::new()), + sft: vec![SFTRefStorage::default()], + index_map: HashMap::new(), } } pub fn addr_to_index(addr: Address) -> u8 { SFT_DENSE_CHUNK_MAP_INDEX.load_atomic::(addr, Ordering::Relaxed) } - - fn sft(&self) -> &Vec<*const (dyn SFT + Sync + 'static)> { - unsafe { &*self.sft.get() } - } - - fn index_map(&self) -> &HashMap { - unsafe { &*self.index_map.get() } - } } } #[allow(dead_code)] mod sparse_chunk_map { - use std::cell::UnsafeCell; - use super::*; use crate::util::conversions; use crate::util::conversions::*; @@ -379,7 +432,7 @@ mod sparse_chunk_map { /// The chunk map is a sparse table. It has one entry for each chunk in the address space we may use. pub struct SFTSparseChunkMap { - sft: UnsafeCell>, + sft: Vec, } unsafe impl Sync for SFTSparseChunkMap {} @@ -395,14 +448,15 @@ mod sparse_chunk_map { fn get_checked(&self, address: Address) -> &dyn SFT { if self.has_sft_entry(address) { - unsafe { &**(*self.sft.get()).get_unchecked(address.chunk_index()) } + unsafe { self.get_unchecked(address) } } else { &EMPTY_SPACE_SFT } } unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT { - &**(*self.sft.get()).get_unchecked(address.chunk_index()) + let cell = self.sft.get_unchecked(address.chunk_index()); + cell.load() } /// Update SFT map for the given address range. @@ -446,7 +500,9 @@ mod sparse_chunk_map { impl SFTSparseChunkMap { pub fn new() -> Self { SFTSparseChunkMap { - sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; vm_layout().max_chunks()]), + sft: std::iter::repeat_with(SFTRefStorage::default) + .take(vm_layout().max_chunks()) + .collect(), } } @@ -470,25 +526,21 @@ mod sparse_chunk_map { let mut res = String::new(); const SPACE_PER_LINE: usize = 10; - unsafe { - for i in (0..(*self.sft.get()).len()).step_by(SPACE_PER_LINE) { - let max = if i + SPACE_PER_LINE > (*self.sft.get()).len() { - (*self.sft.get()).len() - } else { - i + SPACE_PER_LINE - }; - let chunks: Vec = (i..max).collect(); - let space_names: Vec<&str> = chunks - .iter() - .map(|&x| (*(*self.sft.get())[x]).name()) - .collect(); - res.push_str(&format!( - "{}: {}", - chunk_index_to_address(i), - space_names.join(",") - )); - res.push('\n'); - } + for i in (0..self.sft.len()).step_by(SPACE_PER_LINE) { + let max = if i + SPACE_PER_LINE > self.sft.len() { + self.sft.len() + } else { + i + SPACE_PER_LINE + }; + let chunks: Vec = (i..max).collect(); + let space_names: Vec<&str> = + chunks.iter().map(|&x| self.sft[x].load().name()).collect(); + res.push_str(&format!( + "{}: {}", + chunk_index_to_address(i), + space_names.join(",") + )); + res.push('\n'); } res @@ -507,7 +559,7 @@ mod sparse_chunk_map { // It is okay to set empty to valid, or set valid to empty. It is wrong if we overwrite a valid value with another valid value. if cfg!(debug_assertions) { - let old = unsafe { (*(*self.sft.get())[chunk]).name() }; + let old = self.sft[chunk].load().name(); let new = sft.name(); // Allow overwriting the same SFT pointer. E.g., if we have set SFT map for a space, then ensure_mapped() is called on the same, // in which case, we still set SFT map again. @@ -520,7 +572,7 @@ mod sparse_chunk_map { new ); } - unsafe { *(*self.sft.get()).get_unchecked_mut(chunk) = sft }; + unsafe { self.sft.get_unchecked(chunk).store(sft) }; } } } diff --git a/src/policy/space.rs b/src/policy/space.rs index f8004fba41..5a1cda431b 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -29,6 +29,7 @@ use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::heap::HeapMeta; use crate::util::memory; use crate::vm::VMBinding; + use std::marker::PhantomData; use std::sync::Arc; use std::sync::Mutex; @@ -43,7 +44,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// Initialize entires in SFT map for the space. This is called when the Space object /// has a non-moving address, as we will use the address to set sft. /// Currently after we create a boxed plan, spaces in the plan have a non-moving address. - fn initialize_sft(&self); + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap); /// A check for the obvious out-of-memory case: if the requested size is larger than /// the heap size, it is definitely an OOM. We would like to identify that, and @@ -593,16 +594,19 @@ impl CommonSpace { rtn } - pub fn initialize_sft(&self, sft: &(dyn SFT + Sync + 'static)) { - // For contiguous space, we eagerly initialize SFT map based on its address range. + pub fn initialize_sft( + &self, + sft: &(dyn SFT + Sync + 'static), + sft_map: &mut dyn crate::policy::sft_map::SFTMap, + ) { + // We have to keep this for now: if a space is contiguous, our page resource will NOT consider newly allocated chunks + // as new chunks (new_chunks = true). In that case, in grow_space(), we do not set SFT when new_chunks = false. + // We can fix this by either of these: + // * fix page resource, so it propelry returns new_chunk + // * change grow_space() so it sets SFT no matter what the new_chunks value is. + // FIXME: eagerly initializing SFT is not a good idea. if self.contiguous { - // We have to keep this for now: if a space is contiguous, our page resource will NOT consider newly allocated chunks - // as new chunks (new_chunks = true). In that case, in grow_space(), we do not set SFT when new_chunks = false. - // We can fix this by either of these: - // * fix page resource, so it propelry returns new_chunk - // * change grow_space() so it sets SFT no matter what the new_chunks value is. - // FIXME: eagerly initializing SFT is not a good idea. - unsafe { SFT_MAP.eager_initialize(sft, self.start, self.extent) }; + unsafe { sft_map.eager_initialize(sft, self.start, self.extent) }; } } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index e2964c52c9..7aa1eb9903 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -91,20 +91,27 @@ impl Space for VMSpace { &self.common } - fn initialize_sft(&self) { + fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) { // Initialize sft for current external pages. This method is called at the end of plan creation. // So we only set SFT for VM regions that are set by options (we skipped sft initialization for them earlier). - for external_pages in self.pr.get_external_pages().iter() { + let vm_regions = self.pr.get_external_pages(); + // We should have at most one region at this point (set by the option). If we allow setting multiple VM spaces through options, + // we can remove this assertion. + assert!(vm_regions.len() <= 1); + for external_pages in vm_regions.iter() { // Chunk align things. let start = external_pages.start.align_down(BYTES_IN_CHUNK); let size = external_pages.end.align_up(BYTES_IN_CHUNK) - start; // The region should be empty in SFT map -- if they were set before this point, there could be invalid SFT pointers. debug_assert_eq!( - SFT_MAP.get_checked(start).name(), + sft_map.get_checked(start).name(), crate::policy::sft::EMPTY_SFT_NAME ); // Set SFT - self.set_sft_for_vm_region(start, size); + assert!(sft_map.has_sft_entry(start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", start); + unsafe { + sft_map.eager_initialize(self.as_sft(), start, size); + } } } @@ -218,7 +225,10 @@ impl VMSpace { // self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor); // Set SFT if we should if set_sft { - self.set_sft_for_vm_region(chunk_start, chunk_size); + assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); + unsafe { + SFT_MAP.update(self.as_sft(), chunk_start, chunk_size); + } } self.pr.add_new_external_pages(ExternalPages { @@ -227,18 +237,6 @@ impl VMSpace { }); } - fn set_sft_for_vm_region(&self, chunk_start: Address, chunk_size: usize) { - assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK)); - assert!(crate::util::conversions::raw_is_aligned( - chunk_size, - BYTES_IN_CHUNK - )); - assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); - unsafe { - SFT_MAP.eager_initialize(self.as_sft(), chunk_start, chunk_size); - } - } - pub fn prepare(&mut self) { self.mark_state.on_global_prepare::(); for external_pages in self.pr.get_external_pages().iter() { diff --git a/src/util/rust_util/mod.rs b/src/util/rust_util/mod.rs index b60161f507..e3596ff4a1 100644 --- a/src/util/rust_util/mod.rs +++ b/src/util/rust_util/mod.rs @@ -78,6 +78,19 @@ impl InitializeOnce { debug_assert!(self.once.is_completed()); unsafe { (*self.v.get()).assume_init_ref() } } + + /// Get a mutable reference to the value. + /// This is currently only used for SFTMap during plan creation (single threaded), + /// and before the plan creation is done, the binding cannot use MMTK at all. + /// + /// # Safety + /// The caller needs to make sure there is no race when mutating the value. + #[allow(clippy::mut_from_ref)] + pub unsafe fn get_mut(&self) -> &mut T { + // We only assert in debug builds. + debug_assert!(self.once.is_completed()); + unsafe { (*self.v.get()).assume_init_mut() } + } } impl std::ops::Deref for InitializeOnce {