diff --git a/CHANGELOG.md b/CHANGELOG.md index 1881e7bb..4823eecd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,17 @@ ### Added - \[[#311](https://github.com/rust-vmm/vm-memory/pull/311)\] Allow compiling without the ReadVolatile and WriteVolatile implementations +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\] `GuestRegionContainer`, a generic container of `GuestMemoryRegion`s, generalizing `GuestMemoryMmap` (which + is now a type alias for `GuestRegionContainer`) ### Changed - \[[#307](https://github.com/rust-vmm/vm-memory/pull/304)\] Move `read_volatile_from`, `read_exact_volatile_from`, `write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` a default implementation, + based on linear search. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Implement `Bytes` generically + for all `R: GuestMemoryRegion`. ### Removed diff --git a/src/atomic.rs b/src/atomic.rs index 4b20b2c4..22697d05 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -140,14 +140,12 @@ impl GuestMemoryExclusiveGuard<'_, M> { } #[cfg(test)] -#[cfg(feature = "backend-mmap")] mod tests { use super::*; - use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion}; + use crate::region::tests::{new_guest_memory_collection_from_regions, Collection, MockRegion}; + use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize}; - type GuestMemoryMmap = crate::GuestMemoryMmap<()>; - type GuestRegionMmap = crate::GuestRegionMmap<()>; - type GuestMemoryMmapAtomic = GuestMemoryAtomic; + type GuestMemoryMmapAtomic = GuestMemoryAtomic; #[test] fn test_atomic_memory() { @@ -157,7 +155,7 @@ mod tests { (GuestAddress(0x1000), region_size), ]; let mut iterated_regions = Vec::new(); - let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); + let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = gm.memory(); @@ -166,7 +164,7 @@ mod tests { } for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); + iterated_regions.push((region.start_addr(), region.len())); } assert_eq!(regions, iterated_regions); assert_eq!(mem.num_regions(), 2); @@ -207,7 +205,7 @@ mod tests { (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; - let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); + let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = { let guard1 = gm.memory(); @@ -219,11 +217,11 @@ mod tests { #[test] fn test_atomic_hotplug() { let region_size = 0x1000; - let regions = vec![ + let regions = [ (GuestAddress(0x0), region_size), (GuestAddress(0x10_0000), region_size), ]; - let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); + let mut gmm = Arc::new(new_guest_memory_collection_from_regions(®ions).unwrap()); let gm: GuestMemoryAtomic<_> = gmm.clone().into(); let mem_orig = gm.memory(); assert_eq!(mem_orig.num_regions(), 2); @@ -231,26 +229,32 @@ mod tests { { let guard = gm.lock().unwrap(); let new_gmm = Arc::make_mut(&mut gmm); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) - .unwrap(), - ); - new_gmm.insert_region(mmap).unwrap_err(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x8000), + len: 0x1000, + })) + .unwrap(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x4000), + len: 0x1000, + })) + .unwrap(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0xc000), + len: 0x1000, + })) + .unwrap(); + + new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x8000), + len: 0x1000, + })) + .unwrap_err(); + guard.replace(new_gmm); } diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 189ea8dd..3d018e04 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -50,10 +50,11 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use crate::address::{Address, AddressValue}; -use crate::bitmap::{Bitmap, BS, MS}; +use crate::bitmap::MS; use crate::bytes::{AtomicAccess, Bytes}; use crate::io::{ReadVolatile, WriteVolatile}; use crate::volatile_memory::{self, VolatileSlice}; +use crate::GuestMemoryRegion; /// Errors associated with handling guest memory accesses. #[allow(missing_docs)] @@ -158,139 +159,6 @@ impl FileOffset { } } -/// Represents a continuous region of guest physical memory. -#[allow(clippy::len_without_is_empty)] -pub trait GuestMemoryRegion: Bytes { - /// Type used for dirty memory tracking. - type B: Bitmap; - - /// Returns the size of the region. - fn len(&self) -> GuestUsize; - - /// Returns the minimum (inclusive) address managed by the region. - fn start_addr(&self) -> GuestAddress; - - /// Returns the maximum (inclusive) address managed by the region. - fn last_addr(&self) -> GuestAddress { - // unchecked_add is safe as the region bounds were checked when it was created. - self.start_addr().unchecked_add(self.len() - 1) - } - - /// Borrow the associated `Bitmap` object. - fn bitmap(&self) -> &Self::B; - - /// Returns the given address if it is within this region. - fn check_address(&self, addr: MemoryRegionAddress) -> Option { - if self.address_in_range(addr) { - Some(addr) - } else { - None - } - } - - /// Returns `true` if the given address is within this region. - fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { - addr.raw_value() < self.len() - } - - /// Returns the address plus the offset if it is in this region. - fn checked_offset( - &self, - base: MemoryRegionAddress, - offset: usize, - ) -> Option { - base.checked_add(offset as u64) - .and_then(|addr| self.check_address(addr)) - } - - /// Tries to convert an absolute address to a relative address within this region. - /// - /// Returns `None` if `addr` is out of the bounds of this region. - fn to_region_addr(&self, addr: GuestAddress) -> Option { - addr.checked_offset_from(self.start_addr()) - .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) - } - - /// Returns the host virtual address corresponding to the region address. - /// - /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, - /// have the capability to mmap guest address range into host virtual address space for - /// direct access, so the corresponding host virtual address may be passed to other subsystems. - /// - /// # Note - /// The underlying guest memory is not protected from memory aliasing, which breaks the - /// Rust memory safety model. It's the caller's responsibility to ensure that there's no - /// concurrent accesses to the underlying guest memory. - fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { - Err(Error::HostAddressNotAvailable) - } - - /// Returns information regarding the file and offset backing this memory region. - fn file_offset(&self) -> Option<&FileOffset> { - None - } - - /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at - /// `offset`. - #[allow(unused_variables)] - fn get_slice( - &self, - offset: MemoryRegionAddress, - count: usize, - ) -> Result>> { - Err(Error::HostAddressNotAvailable) - } - - /// Gets a slice of memory for the entire region that supports volatile access. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; - /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; - /// # - /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) - /// .expect("Could not create guest memory"); - /// let slice = region - /// .as_volatile_slice() - /// .expect("Could not get volatile slice"); - /// - /// let v = 42u32; - /// let r = slice - /// .get_ref::(0x200) - /// .expect("Could not get reference"); - /// r.store(v); - /// assert_eq!(r.load(), v); - /// # } - /// ``` - fn as_volatile_slice(&self) -> Result>> { - self.get_slice(MemoryRegionAddress(0), self.len() as usize) - } - - /// Show if the region is based on the `HugeTLBFS`. - /// Returns Some(true) if the region is backed by hugetlbfs. - /// None represents that no information is available. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; - /// let addr = GuestAddress(0x1000); - /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); - /// let r = mem.find_region(addr).unwrap(); - /// assert_eq!(r.is_hugetlbfs(), None); - /// # } - /// ``` - #[cfg(target_os = "linux")] - fn is_hugetlbfs(&self) -> Option { - None - } -} - /// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object. /// The vm-memory crate already provides trivial implementation for /// references to `GuestMemory` or reference-counted `GuestMemory` objects, @@ -408,7 +276,10 @@ pub trait GuestMemory { fn num_regions(&self) -> usize; /// Returns the region containing the specified address or `None`. - fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>; + fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { + self.iter() + .find(|region| addr >= region.start_addr() && addr <= region.last_addr()) + } /// Gets an iterator over the entries in the collection. /// diff --git a/src/lib.rs b/src/lib.rs index b8fe5f40..d89f8459 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,9 +47,12 @@ pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize}; pub mod guest_memory; pub use guest_memory::{ Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, - GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, + GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, }; +pub mod region; +pub use region::{GuestMemoryRegion, GuestRegionCollection, GuestRegionError as Error}; + pub mod io; pub use io::{ReadVolatile, WriteVolatile}; @@ -57,7 +60,7 @@ pub use io::{ReadVolatile, WriteVolatile}; pub mod mmap; #[cfg(feature = "backend-mmap")] -pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion}; +pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapRegion}; #[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] pub use mmap::{MmapRange, MmapXenFlags}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 9f9f1939..99729fe3 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -17,16 +17,13 @@ use std::borrow::Borrow; use std::io::{Seek, SeekFrom}; use std::ops::Deref; use std::result; -use std::sync::atomic::Ordering; -use std::sync::Arc; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::{ - self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, -}; +use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; +use crate::region::{GuestMemoryRegion, GuestMemoryRegionBytes}; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; -use crate::{AtomicAccess, Bytes, ReadVolatile, WriteVolatile}; +use crate::{Error, GuestRegionCollection}; // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; @@ -51,27 +48,6 @@ pub use std::io::Error as MmapRegionError; #[cfg(target_family = "windows")] pub use windows::MmapRegion; -/// Errors that can occur when creating a memory map. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Adding the guest base address to the length of the underlying mapping resulted - /// in an overflow. - #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] - InvalidGuestRegion, - /// Error creating a `MmapRegion` object. - #[error("{0}")] - MmapRegion(MmapRegionError), - /// No memory region found. - #[error("No memory region found")] - NoMemoryRegion, - /// Some of the memory regions intersect with each other. - #[error("Some of the memory regions intersect with each other")] - MemoryRegionOverlap, - /// The provided memory regions haven't been sorted. - #[error("The provided memory regions haven't been sorted")] - UnsortedMemoryRegions, -} - // TODO: use this for Windows as well after we redefine the Error type there. #[cfg(target_family = "unix")] /// Checks if a mapping of `size` bytes fits at the provided `file_offset`. @@ -168,154 +144,6 @@ impl GuestRegionMmap { } } -impl Bytes for GuestRegionMmap { - type E = guest_memory::Error; - - /// # Examples - /// * Write a slice at guest address 0x1200. - /// - /// ``` - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let res = gm - /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) - /// .expect("Could not write to guest memory"); - /// assert_eq!(5, res); - /// ``` - fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .write(buf, maddr) - .map_err(Into::into) - } - - /// # Examples - /// * Read a slice of length 16 at guestaddress 0x1200. - /// - /// ``` - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let buf = &mut [0u8; 16]; - /// let res = gm - /// .read(buf, GuestAddress(0x1200)) - /// .expect("Could not read from guest memory"); - /// assert_eq!(16, res); - /// ``` - fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .read(buf, maddr) - .map_err(Into::into) - } - - fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .write_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .read_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result - where - F: ReadVolatile, - { - self.as_volatile_slice() - .unwrap() - .read_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn read_exact_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: ReadVolatile, - { - self.as_volatile_slice() - .unwrap() - .read_exact_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn write_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result - where - F: WriteVolatile, - { - self.as_volatile_slice() - .unwrap() - .write_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn write_all_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: WriteVolatile, - { - self.as_volatile_slice() - .unwrap() - .write_all_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn store( - &self, - val: T, - addr: MemoryRegionAddress, - order: Ordering, - ) -> guest_memory::Result<()> { - self.as_volatile_slice().and_then(|s| { - s.store(val, addr.raw_value() as usize, order) - .map_err(Into::into) - }) - } - - fn load( - &self, - addr: MemoryRegionAddress, - order: Ordering, - ) -> guest_memory::Result { - self.as_volatile_slice() - .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) - } -} - impl GuestMemoryRegion for GuestRegionMmap { type B = B; @@ -362,23 +190,17 @@ impl GuestMemoryRegion for GuestRegionMmap { } } +impl GuestMemoryRegionBytes for GuestRegionMmap {} + /// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory /// in the current process. /// /// Represents the entire physical memory of the guest by tracking all its memory regions. /// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the /// virtual address space of the calling process. -#[derive(Clone, Debug, Default)] -pub struct GuestMemoryMmap { - regions: Vec>>, -} +pub type GuestMemoryMmap = GuestRegionCollection>; impl GuestMemoryMmap { - /// Creates an empty `GuestMemoryMmap` instance. - pub fn new() -> Self { - Self::default() - } - /// Creates a container and allocates anonymous memory for guest memory regions. /// /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. @@ -406,111 +228,6 @@ impl GuestMemoryMmap { } } -impl GuestMemoryMmap { - /// Creates a new `GuestMemoryMmap` from a vector of regions. - /// - /// # Arguments - /// - /// * `regions` - The vector of regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_regions(mut regions: Vec>) -> result::Result { - Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) - } - - /// Creates a new `GuestMemoryMmap` from a vector of Arc regions. - /// - /// Similar to the constructor `from_regions()` as it returns a - /// `GuestMemoryMmap`. The need for this constructor is to provide a way for - /// consumer of this API to create a new `GuestMemoryMmap` based on existing - /// regions coming from an existing `GuestMemoryMmap` instance. - /// - /// # Arguments - /// - /// * `regions` - The vector of `Arc` regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_arc_regions(regions: Vec>>) -> result::Result { - if regions.is_empty() { - return Err(Error::NoMemoryRegion); - } - - for window in regions.windows(2) { - let prev = &window[0]; - let next = &window[1]; - - if prev.start_addr() > next.start_addr() { - return Err(Error::UnsortedMemoryRegions); - } - - if prev.last_addr() >= next.start_addr() { - return Err(Error::MemoryRegionOverlap); - } - } - - Ok(Self { regions }) - } - - /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. - /// - /// # Arguments - /// * `region`: the memory region to insert into the guest memory object. - pub fn insert_region( - &self, - region: Arc>, - ) -> result::Result, Error> { - let mut regions = self.regions.clone(); - regions.push(region); - regions.sort_by_key(|x| x.start_addr()); - - Self::from_arc_regions(regions) - } - - /// Remove a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap` - /// on success, together with the removed region. - /// - /// # Arguments - /// * `base`: base address of the region to be removed - /// * `size`: size of the region to be removed - pub fn remove_region( - &self, - base: GuestAddress, - size: GuestUsize, - ) -> result::Result<(GuestMemoryMmap, Arc>), Error> { - if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { - if self.regions.get(region_index).unwrap().mapping.size() as GuestUsize == size { - let mut regions = self.regions.clone(); - let region = regions.remove(region_index); - return Ok((Self { regions }, region)); - } - } - - Err(Error::InvalidGuestRegion) - } -} - -impl GuestMemory for GuestMemoryMmap { - type R = GuestRegionMmap; - - fn num_regions(&self) -> usize { - self.regions.len() - } - - fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap> { - let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { - Ok(x) => Some(x), - // Within the closest region with starting address < addr - Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), - _ => None, - }; - index.map(|x| self.regions[x].as_ref()) - } - - fn iter(&self) -> impl Iterator { - self.regions.iter().map(AsRef::as_ref) - } -} - #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] @@ -520,7 +237,7 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::GuestAddressSpace; + use crate::{Bytes, GuestMemory, GuestMemoryError}; use std::io::Write; use std::mem; @@ -528,8 +245,8 @@ mod tests { use std::{fs::File, path::Path}; use vmm_sys_util::tempfile::TempFile; - type GuestMemoryMmap = super::GuestMemoryMmap<()>; type GuestRegionMmap = super::GuestRegionMmap<()>; + type GuestMemoryMmap = super::GuestRegionCollection; type MmapRegion = super::MmapRegion<()>; #[test] @@ -538,235 +255,6 @@ mod tests { assert_eq!(1024, m.size()); } - fn check_guest_memory_mmap( - maybe_guest_mem: Result, - expected_regions_summary: &[(GuestAddress, usize)], - ) { - assert!(maybe_guest_mem.is_ok()); - - let guest_mem = maybe_guest_mem.unwrap(); - assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); - let maybe_last_mem_reg = expected_regions_summary.last(); - if let Some((region_addr, region_size)) = maybe_last_mem_reg { - let mut last_addr = region_addr.unchecked_add(*region_size as u64); - if last_addr.raw_value() != 0 { - last_addr = last_addr.unchecked_sub(1); - } - assert_eq!(guest_mem.last_addr(), last_addr); - } - for ((region_addr, region_size), mmap) in expected_regions_summary - .iter() - .zip(guest_mem.regions.iter()) - { - assert_eq!(region_addr, &mmap.guest_base); - assert_eq!(region_size, &mmap.mapping.size()); - - assert!(guest_mem.find_region(*region_addr).is_some()); - } - } - - fn new_guest_memory_mmap( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_ranges(regions_summary) - } - - fn new_guest_memory_mmap_from_regions( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_regions( - regions_summary - .iter() - .map(|(region_addr, region_size)| { - GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap() - }) - .collect(), - ) - } - - fn new_guest_memory_mmap_from_arc_regions( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_arc_regions( - regions_summary - .iter() - .map(|(region_addr, region_size)| { - Arc::new(GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap()) - }) - .collect(), - ) - } - - fn new_guest_memory_mmap_with_files( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - let regions: Vec<(GuestAddress, usize, Option)> = regions_summary - .iter() - .map(|(region_addr, region_size)| { - let f = TempFile::new().unwrap().into_file(); - f.set_len(*region_size as u64).unwrap(); - - (*region_addr, *region_size, Some(FileOffset::new(f, 0))) - }) - .collect(); - - GuestMemoryMmap::from_ranges_with_files(®ions) - } - - #[test] - fn test_no_memory_region() { - let regions_summary = []; - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - } - - #[test] - fn test_overlapping_memory_regions() { - let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)]; - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - } - - #[test] - fn test_unsorted_memory_regions() { - let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)]; - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - } - - #[test] - fn test_valid_memory_regions() { - let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)]; - - let guest_mem = GuestMemoryMmap::new(); - assert_eq!(guest_mem.regions.len(), 0); - - check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary); - - check_guest_memory_mmap( - new_guest_memory_mmap_with_files(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_mmap_from_regions(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_mmap_from_arc_regions(®ions_summary), - ®ions_summary, - ); - } - #[test] fn slice_addr() { let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap(); @@ -792,64 +280,6 @@ mod tests { assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); } - #[test] - fn test_address_in_range() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert!(guest_mem.address_in_range(GuestAddress(0x200))); - assert!(!guest_mem.address_in_range(GuestAddress(0x600))); - assert!(guest_mem.address_in_range(GuestAddress(0xa00))); - assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); - } - } - - #[test] - fn test_check_address() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert_eq!( - guest_mem.check_address(GuestAddress(0x200)), - Some(GuestAddress(0x200)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); - assert_eq!( - guest_mem.check_address(GuestAddress(0xa00)), - Some(GuestAddress(0xa00)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); - } - } - #[test] fn test_to_region_addr() { let f1 = TempFile::new().unwrap().into_file(); @@ -964,18 +394,13 @@ mod tests { for gm in gm_list.iter() { let val1: u64 = 0xaa55_aa55_aa55_aa55; let val2: u64 = 0x55aa_55aa_55aa_55aa; - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()), - format!("InvalidGuestAddress({:?})", bad_addr,) - ); - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()), - format!( - "PartialBuffer {{ expected: {:?}, completed: {:?} }}", - mem::size_of::(), - max_addr.checked_offset_from(bad_addr2).unwrap() - ) - ); + assert!(matches!( + gm.write_obj(val1, bad_addr).unwrap_err(), + GuestMemoryError::InvalidGuestAddress(addr) if addr == bad_addr + )); + assert!(matches!( + gm.write_obj(val1, bad_addr2).unwrap_err(), + GuestMemoryError::PartialBuffer { expected, completed} if expected == size_of::() && completed == max_addr.checked_offset_from(bad_addr2).unwrap() as usize)); gm.write_obj(val1, GuestAddress(0x500)).unwrap(); gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); @@ -1061,63 +486,6 @@ mod tests { } } - #[test] - fn create_vec_with_regions() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = GuestMemoryMmap::from_ranges(®ions).unwrap(); - - for region in gm.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in gm.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - assert_eq!(gm.regions[0].guest_base, regions[0].0); - assert_eq!(gm.regions[1].guest_base, regions[1].0); - } - - #[test] - fn test_memory() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem = gm.memory(); - - for region in mem.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - assert_eq!(gm.regions[0].guest_base, regions[0].0); - assert_eq!(gm.regions[1].guest_base, regions[1].0); - } - #[test] fn test_access_cross_boundary() { let f1 = TempFile::new().unwrap().into_file(); @@ -1197,62 +565,6 @@ mod tests { assert_eq!(region.file_offset().unwrap().start(), offset); } - #[test] - fn test_mmap_insert_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0x8000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0x4000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); - gm.insert_region(mmap).unwrap_err(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 5); - - assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000)); - assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000)); - assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000)); - assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000)); - } - - #[test] - fn test_mmap_remove_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - gm.remove_region(GuestAddress(0), 128).unwrap_err(); - gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); - let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 1); - - assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); - } - #[test] fn test_guest_memory_mmap_get_slice() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap(); @@ -1323,63 +635,6 @@ mod tests { assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); } - #[test] - fn test_checked_offset() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x200), - Some(GuestAddress(0x200)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0xa00), - Some(GuestAddress(0xa00)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr2, 0x7ff), - Some(GuestAddress(0xfff)) - ); - assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); - assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); - - assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x400 - 1), - Some(GuestAddress(0x400 - 1)) - ); - } - - #[test] - fn test_check_range() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert!(guest_mem.check_range(start_addr1, 0x0)); - assert!(guest_mem.check_range(start_addr1, 0x200)); - assert!(guest_mem.check_range(start_addr1, 0x400)); - assert!(!guest_mem.check_range(start_addr1, 0xa00)); - assert!(guest_mem.check_range(start_addr2, 0x7ff)); - assert!(guest_mem.check_range(start_addr2, 0x800)); - assert!(!guest_mem.check_range(start_addr2, 0x801)); - assert!(!guest_mem.check_range(start_addr2, 0xc00)); - assert!(!guest_mem.check_range(start_addr1, usize::MAX)); - } - #[test] fn test_atomic_accesses() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap(); diff --git a/src/mmap/unix.rs b/src/mmap/unix.rs index 752f05f1..58b031a3 100644 --- a/src/mmap/unix.rs +++ b/src/mmap/unix.rs @@ -558,7 +558,7 @@ mod tests { prot, flags, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); // Offset + size is greater than the size of the file (which is 0 at this point). let r = MmapRegion::build( @@ -567,7 +567,7 @@ mod tests { prot, flags, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "MappingPastEof"); + assert!(matches!(r.unwrap_err(), Error::MappingPastEof)); // MAP_FIXED was specified among the flags. let r = MmapRegion::build( @@ -576,7 +576,7 @@ mod tests { prot, flags | libc::MAP_FIXED, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "MapFixed"); + assert!(matches!(r.unwrap_err(), Error::MapFixed)); // Let's resize the file. assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); @@ -621,7 +621,7 @@ mod tests { let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; let r = unsafe { MmapRegion::build_raw((addr + 1) as *mut u8, size, prot, flags) }; - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidPointer"); + assert!(matches!(r.unwrap_err(), Error::InvalidPointer)); let r = unsafe { MmapRegion::build_raw(addr as *mut u8, size, prot, flags).unwrap() }; diff --git a/src/mmap/xen.rs b/src/mmap/xen.rs index 7c5c0670..590c34a1 100644 --- a/src/mmap/xen.rs +++ b/src/mmap/xen.rs @@ -1077,26 +1077,18 @@ mod tests { range.mmap_flags = 16; let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!("MmapFlags({})", range.mmap_flags), - ); + assert!(matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == range.mmap_flags)); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits(); let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!("MmapFlags({:x})", MmapXenFlags::ALL.bits()), + assert!( + matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::ALL.bits()) ); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!( - "MmapFlags({:x})", - MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits(), - ), + assert!( + matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits()) ); } @@ -1132,17 +1124,17 @@ mod tests { range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)); range.prot = None; let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); let mut range = MmapRange::initialized(true); range.size = 0; @@ -1164,7 +1156,7 @@ mod tests { let mut range = MmapRange::initialized(true); range.prot = None; let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.prot = None; @@ -1174,12 +1166,12 @@ mod tests { let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); let mut range = MmapRange::initialized(true); range.size = 0; diff --git a/src/region.rs b/src/region.rs new file mode 100644 index 00000000..a6395d37 --- /dev/null +++ b/src/region.rs @@ -0,0 +1,778 @@ +//! Module containing abstracts for dealing with contiguous regions of guest memory + +use crate::bitmap::{Bitmap, BS}; +use crate::guest_memory::Result; +use crate::{ + Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, + GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile, +}; +use std::sync::atomic::Ordering; +use std::sync::Arc; + +/// Represents a continuous region of guest physical memory. +#[allow(clippy::len_without_is_empty)] +pub trait GuestMemoryRegion: Bytes { + /// Type used for dirty memory tracking. + type B: Bitmap; + + /// Returns the size of the region. + fn len(&self) -> GuestUsize; + + /// Returns the minimum (inclusive) address managed by the region. + fn start_addr(&self) -> GuestAddress; + + /// Returns the maximum (inclusive) address managed by the region. + fn last_addr(&self) -> GuestAddress { + // unchecked_add is safe as the region bounds were checked when it was created. + self.start_addr().unchecked_add(self.len() - 1) + } + + /// Borrow the associated `Bitmap` object. + fn bitmap(&self) -> &Self::B; + + /// Returns the given address if it is within this region. + fn check_address(&self, addr: MemoryRegionAddress) -> Option { + if self.address_in_range(addr) { + Some(addr) + } else { + None + } + } + + /// Returns `true` if the given address is within this region. + fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { + addr.raw_value() < self.len() + } + + /// Returns the address plus the offset if it is in this region. + fn checked_offset( + &self, + base: MemoryRegionAddress, + offset: usize, + ) -> Option { + base.checked_add(offset as u64) + .and_then(|addr| self.check_address(addr)) + } + + /// Tries to convert an absolute address to a relative address within this region. + /// + /// Returns `None` if `addr` is out of the bounds of this region. + fn to_region_addr(&self, addr: GuestAddress) -> Option { + addr.checked_offset_from(self.start_addr()) + .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) + } + + /// Returns the host virtual address corresponding to the region address. + /// + /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, + /// have the capability to mmap guest address range into host virtual address space for + /// direct access, so the corresponding host virtual address may be passed to other subsystems. + /// + /// # Note + /// The underlying guest memory is not protected from memory aliasing, which breaks the + /// Rust memory safety model. It's the caller's responsibility to ensure that there's no + /// concurrent accesses to the underlying guest memory. + fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { + Err(GuestMemoryError::HostAddressNotAvailable) + } + + /// Returns information regarding the file and offset backing this memory region. + fn file_offset(&self) -> Option<&FileOffset> { + None + } + + /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at + /// `offset`. + #[allow(unused_variables)] + fn get_slice( + &self, + offset: MemoryRegionAddress, + count: usize, + ) -> Result>> { + Err(GuestMemoryError::HostAddressNotAvailable) + } + + /// Gets a slice of memory for the entire region that supports volatile access. + /// + /// # Examples (uses the `backend-mmap` feature) + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; + /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; + /// # + /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) + /// .expect("Could not create guest memory"); + /// let slice = region + /// .as_volatile_slice() + /// .expect("Could not get volatile slice"); + /// + /// let v = 42u32; + /// let r = slice + /// .get_ref::(0x200) + /// .expect("Could not get reference"); + /// r.store(v); + /// assert_eq!(r.load(), v); + /// # } + /// ``` + fn as_volatile_slice(&self) -> Result>> { + self.get_slice(MemoryRegionAddress(0), self.len() as usize) + } + + /// Show if the region is based on the `HugeTLBFS`. + /// Returns Some(true) if the region is backed by hugetlbfs. + /// None represents that no information is available. + /// + /// # Examples (uses the `backend-mmap` feature) + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; + /// let addr = GuestAddress(0x1000); + /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); + /// let r = mem.find_region(addr).unwrap(); + /// assert_eq!(r.is_hugetlbfs(), None); + /// # } + /// ``` + #[cfg(target_os = "linux")] + fn is_hugetlbfs(&self) -> Option { + None + } +} + +/// Errors that can occur when dealing with [`GuestRegion`]s, or collections thereof +#[derive(Debug, thiserror::Error)] +pub enum GuestRegionError { + /// Adding the guest base address to the length of the underlying mapping resulted + /// in an overflow. + #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] + #[cfg(feature = "backend-mmap")] + InvalidGuestRegion, + /// Error creating a `MmapRegion` object. + #[error("{0}")] + #[cfg(feature = "backend-mmap")] + MmapRegion(crate::mmap::MmapRegionError), + /// No memory region found. + #[error("No memory region found")] + NoMemoryRegion, + /// Some of the memory regions intersect with each other. + #[error("Some of the memory regions intersect with each other")] + MemoryRegionOverlap, + /// The provided memory regions haven't been sorted. + #[error("The provided memory regions haven't been sorted")] + UnsortedMemoryRegions, +} + +/// [`GuestMemory`](trait.GuestMemory.html) implementation based on a homogeneous collection +/// of [`GuestMemoryRegion`] implementations. +/// +/// Represents a sorted set of non-overlapping physical guest memory regions. +#[derive(Debug)] +pub struct GuestRegionCollection { + regions: Vec>, +} + +impl Default for GuestRegionCollection { + fn default() -> Self { + Self { + regions: Vec::new(), + } + } +} + +impl Clone for GuestRegionCollection { + fn clone(&self) -> Self { + GuestRegionCollection { + regions: self.regions.iter().map(Arc::clone).collect(), + } + } +} + +impl GuestRegionCollection { + /// Creates an empty `GuestMemoryMmap` instance. + pub fn new() -> Self { + Self::default() + } + + /// Creates a new [`GuestRegionCollection`] from a vector of regions. + /// + /// # Arguments + /// + /// * `regions` - The vector of regions. + /// The regions shouldn't overlap, and they should be sorted + /// by the starting address. + pub fn from_regions(mut regions: Vec) -> std::result::Result { + Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) + } + + /// Creates a new [`GuestRegionCollection`] from a vector of Arc regions. + /// + /// Similar to the constructor `from_regions()` as it returns a + /// [`GuestRegionCollection`]. The need for this constructor is to provide a way for + /// consumer of this API to create a new [`GuestRegionCollection`] based on existing + /// regions coming from an existing [`GuestRegionCollection`] instance. + /// + /// # Arguments + /// + /// * `regions` - The vector of `Arc` regions. + /// The regions shouldn't overlap and they should be sorted + /// by the starting address. + pub fn from_arc_regions(regions: Vec>) -> std::result::Result { + if regions.is_empty() { + return Err(GuestRegionError::NoMemoryRegion); + } + + for window in regions.windows(2) { + let prev = &window[0]; + let next = &window[1]; + + if prev.start_addr() > next.start_addr() { + return Err(GuestRegionError::UnsortedMemoryRegions); + } + + if prev.last_addr() >= next.start_addr() { + return Err(GuestRegionError::MemoryRegionOverlap); + } + } + + Ok(Self { regions }) + } + + /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. + /// + /// # Arguments + /// * `region`: the memory region to insert into the guest memory object. + pub fn insert_region( + &self, + region: Arc, + ) -> std::result::Result, GuestRegionError> { + let mut regions = self.regions.clone(); + regions.push(region); + regions.sort_by_key(|x| x.start_addr()); + + Self::from_arc_regions(regions) + } + + /// Remove a region from the [`GuestRegionCollection`] object and return a new `GuestRegionCollection` + /// on success, together with the removed region. + /// + /// # Arguments + /// * `base`: base address of the region to be removed + /// * `size`: size of the region to be removed + pub fn remove_region( + &self, + base: GuestAddress, + size: GuestUsize, + ) -> std::result::Result<(GuestRegionCollection, Arc), GuestRegionError> { + if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { + if self.regions.get(region_index).unwrap().len() == size { + let mut regions = self.regions.clone(); + let region = regions.remove(region_index); + return Ok((Self { regions }, region)); + } + } + + Err(GuestRegionError::NoMemoryRegion) + } +} + +impl GuestMemory for GuestRegionCollection { + type R = R; + + fn num_regions(&self) -> usize { + self.regions.len() + } + + fn find_region(&self, addr: GuestAddress) -> Option<&R> { + let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { + Ok(x) => Some(x), + // Within the closest region with starting address < addr + Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), + _ => None, + }; + index.map(|x| self.regions[x].as_ref()) + } + + fn iter(&self) -> impl Iterator { + self.regions.iter().map(AsRef::as_ref) + } +} + +/// A marker trait that if implemented on a type `R` makes available a default +/// implementation of `Bytes` for `R`, based on the assumption +/// that the entire `GuestMemoryRegion` is just traditional memory without any +/// special access requirements. +pub trait GuestMemoryRegionBytes: GuestMemoryRegion {} + +impl Bytes for R { + type E = GuestMemoryError; + + /// # Examples + /// * Write a slice at guest address 0x1200. + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + /// # + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # let start_addr = GuestAddress(0x1000); + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) + /// # .expect("Could not create guest memory"); + /// # + /// let res = gm + /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) + /// .expect("Could not write to guest memory"); + /// assert_eq!(5, res); + /// # } + /// ``` + fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .write(buf, maddr) + .map_err(Into::into) + } + + /// # Examples + /// * Read a slice of length 16 at guestaddress 0x1200. + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + /// # + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # let start_addr = GuestAddress(0x1000); + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) + /// # .expect("Could not create guest memory"); + /// # + /// let buf = &mut [0u8; 16]; + /// let res = gm + /// .read(buf, GuestAddress(0x1200)) + /// .expect("Could not read from guest memory"); + /// assert_eq!(16, res); + /// # } + /// ``` + fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .read(buf, maddr) + .map_err(Into::into) + } + + fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .write_slice(buf, maddr) + .map_err(Into::into) + } + + fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .read_slice(buf, maddr) + .map_err(Into::into) + } + + fn read_volatile_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> Result + where + F: ReadVolatile, + { + self.as_volatile_slice()? + .read_volatile_from(addr.0 as usize, src, count) + .map_err(Into::into) + } + + fn read_exact_volatile_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> Result<()> + where + F: ReadVolatile, + { + self.as_volatile_slice()? + .read_exact_volatile_from(addr.0 as usize, src, count) + .map_err(Into::into) + } + + fn write_volatile_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> Result + where + F: WriteVolatile, + { + self.as_volatile_slice()? + .write_volatile_to(addr.0 as usize, dst, count) + .map_err(Into::into) + } + + fn write_all_volatile_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> Result<()> + where + F: WriteVolatile, + { + self.as_volatile_slice()? + .write_all_volatile_to(addr.0 as usize, dst, count) + .map_err(Into::into) + } + + fn store( + &self, + val: T, + addr: MemoryRegionAddress, + order: Ordering, + ) -> Result<()> { + self.as_volatile_slice().and_then(|s| { + s.store(val, addr.raw_value() as usize, order) + .map_err(Into::into) + }) + } + + fn load(&self, addr: MemoryRegionAddress, order: Ordering) -> Result { + self.as_volatile_slice() + .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use crate::region::{GuestMemoryRegionBytes, GuestRegionError}; + use crate::{ + Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, + }; + use std::sync::Arc; + + #[derive(Debug, PartialEq, Eq)] + pub(crate) struct MockRegion { + pub(crate) start: GuestAddress, + pub(crate) len: GuestUsize, + } + + impl GuestMemoryRegion for MockRegion { + type B = (); + + fn len(&self) -> GuestUsize { + self.len + } + + fn start_addr(&self) -> GuestAddress { + self.start + } + + fn bitmap(&self) -> &Self::B { + &() + } + } + + impl GuestMemoryRegionBytes for MockRegion {} + + pub(crate) type Collection = GuestRegionCollection; + + fn check_guest_memory_mmap( + maybe_guest_mem: Result, + expected_regions_summary: &[(GuestAddress, u64)], + ) { + assert!(maybe_guest_mem.is_ok()); + + let guest_mem = maybe_guest_mem.unwrap(); + assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); + let maybe_last_mem_reg = expected_regions_summary.last(); + if let Some((region_addr, region_size)) = maybe_last_mem_reg { + let mut last_addr = region_addr.unchecked_add(*region_size); + if last_addr.raw_value() != 0 { + last_addr = last_addr.unchecked_sub(1); + } + assert_eq!(guest_mem.last_addr(), last_addr); + } + for ((region_addr, region_size), mmap) in + expected_regions_summary.iter().zip(guest_mem.iter()) + { + assert_eq!(region_addr, &mmap.start); + assert_eq!(region_size, &mmap.len); + + assert!(guest_mem.find_region(*region_addr).is_some()); + } + } + + pub(crate) fn new_guest_memory_collection_from_regions( + regions_summary: &[(GuestAddress, u64)], + ) -> Result { + Collection::from_regions( + regions_summary + .iter() + .map(|&(start, len)| MockRegion { start, len }) + .collect(), + ) + } + + fn new_guest_memory_collection_from_arc_regions( + regions_summary: &[(GuestAddress, u64)], + ) -> Result { + Collection::from_arc_regions( + regions_summary + .iter() + .map(|&(start, len)| Arc::new(MockRegion { start, len })) + .collect(), + ) + } + + #[test] + fn test_no_memory_region() { + let regions_summary = []; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::NoMemoryRegion + )); + } + + #[test] + fn test_overlapping_memory_regions() { + let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)]; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::MemoryRegionOverlap + )); + } + + #[test] + fn test_unsorted_memory_regions() { + let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)]; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::UnsortedMemoryRegions + )); + } + + #[test] + fn test_valid_memory_regions() { + let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)]; + + let guest_mem = Collection::new(); + assert_eq!(guest_mem.num_regions(), 0); + + check_guest_memory_mmap( + new_guest_memory_collection_from_regions(®ions_summary), + ®ions_summary, + ); + + check_guest_memory_mmap( + new_guest_memory_collection_from_arc_regions(®ions_summary), + ®ions_summary, + ); + } + + #[test] + fn test_mmap_insert_region() { + let region_size = 0x1000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10_0000), region_size), + ]; + let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); + let mut gm = mem_orig.clone(); + assert_eq!(mem_orig.num_regions(), 2); + + let new_regions = [ + (GuestAddress(0x8000), 0x1000), + (GuestAddress(0x4000), 0x1000), + (GuestAddress(0xc000), 0x1000), + ]; + + for (start, len) in new_regions { + gm = gm + .insert_region(Arc::new(MockRegion { start, len })) + .unwrap(); + } + + gm.insert_region(Arc::new(MockRegion { + start: GuestAddress(0xc000), + len: 0x1000, + })) + .unwrap_err(); + + assert_eq!(mem_orig.num_regions(), 2); + assert_eq!(gm.num_regions(), 5); + + let regions = gm.iter().collect::>(); + + assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); + assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); + assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); + assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); + } + + #[test] + fn test_mmap_remove_region() { + let region_size = 0x1000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10_0000), region_size), + ]; + let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); + let gm = mem_orig.clone(); + assert_eq!(mem_orig.num_regions(), 2); + + gm.remove_region(GuestAddress(0), 128).unwrap_err(); + gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); + let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); + + assert_eq!(mem_orig.num_regions(), 2); + assert_eq!(gm.num_regions(), 1); + + assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); + assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); + } + + #[test] + fn test_iter() { + let region_size = 0x400; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x1000), region_size), + ]; + let mut iterated_regions = Vec::new(); + let gm = new_guest_memory_collection_from_regions(®ions).unwrap(); + + for region in gm.iter() { + assert_eq!(region.len(), region_size as GuestUsize); + } + + for region in gm.iter() { + iterated_regions.push((region.start_addr(), region.len())); + } + assert_eq!(regions, iterated_regions); + + assert!(regions + .iter() + .map(|x| (x.0, x.1)) + .eq(iterated_regions.iter().copied())); + + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].start, regions[0].0); + assert_eq!(mmap_regions[1].start, regions[1].0); + } + + #[test] + fn test_address_in_range() { + let start_addr1 = GuestAddress(0x0); + let start_addr2 = GuestAddress(0x800); + let guest_mem = + new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) + .unwrap(); + + assert!(guest_mem.address_in_range(GuestAddress(0x200))); + assert!(!guest_mem.address_in_range(GuestAddress(0x600))); + assert!(guest_mem.address_in_range(GuestAddress(0xa00))); + assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); + } + + #[test] + fn test_check_address() { + let start_addr1 = GuestAddress(0x0); + let start_addr2 = GuestAddress(0x800); + let guest_mem = + new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) + .unwrap(); + + assert_eq!( + guest_mem.check_address(GuestAddress(0x200)), + Some(GuestAddress(0x200)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); + assert_eq!( + guest_mem.check_address(GuestAddress(0xa00)), + Some(GuestAddress(0xa00)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); + } + + #[test] + fn test_checked_offset() { + let start_addr1 = GuestAddress(0); + let start_addr2 = GuestAddress(0x800); + let start_addr3 = GuestAddress(0xc00); + let guest_mem = new_guest_memory_collection_from_regions(&[ + (start_addr1, 0x400), + (start_addr2, 0x400), + (start_addr3, 0x400), + ]) + .unwrap(); + + assert_eq!( + guest_mem.checked_offset(start_addr1, 0x200), + Some(GuestAddress(0x200)) + ); + assert_eq!( + guest_mem.checked_offset(start_addr1, 0xa00), + Some(GuestAddress(0xa00)) + ); + assert_eq!( + guest_mem.checked_offset(start_addr2, 0x7ff), + Some(GuestAddress(0xfff)) + ); + assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); + assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); + + assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); + assert_eq!( + guest_mem.checked_offset(start_addr1, 0x400 - 1), + Some(GuestAddress(0x400 - 1)) + ); + } + + #[test] + fn test_check_range() { + let start_addr1 = GuestAddress(0); + let start_addr2 = GuestAddress(0x800); + let start_addr3 = GuestAddress(0xc00); + let guest_mem = new_guest_memory_collection_from_regions(&[ + (start_addr1, 0x400), + (start_addr2, 0x400), + (start_addr3, 0x400), + ]) + .unwrap(); + + assert!(guest_mem.check_range(start_addr1, 0x0)); + assert!(guest_mem.check_range(start_addr1, 0x200)); + assert!(guest_mem.check_range(start_addr1, 0x400)); + assert!(!guest_mem.check_range(start_addr1, 0xa00)); + assert!(guest_mem.check_range(start_addr2, 0x7ff)); + assert!(guest_mem.check_range(start_addr2, 0x800)); + assert!(!guest_mem.check_range(start_addr2, 0x801)); + assert!(!guest_mem.check_range(start_addr2, 0xc00)); + assert!(!guest_mem.check_range(start_addr1, usize::MAX)); + } +} diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index 43c1d206..acff239b 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -1488,58 +1488,6 @@ mod tests { slice.compute_end_offset(6, 0).unwrap_err(); } - #[test] - fn test_display_error() { - assert_eq!( - format!("{}", Error::OutOfBounds { addr: 0x10 }), - "address 0x10 is out of bounds" - ); - - assert_eq!( - format!( - "{}", - Error::Overflow { - base: 0x0, - offset: 0x10 - } - ), - "address 0x0 offset by 0x10 would overflow" - ); - - assert_eq!( - format!( - "{}", - Error::TooBig { - nelements: 100_000, - size: 1_000_000_000 - } - ), - "100000 elements of size 1000000000 would overflow a usize" - ); - - assert_eq!( - format!( - "{}", - Error::Misaligned { - addr: 0x4, - alignment: 8 - } - ), - "address 0x4 is not aligned to 8" - ); - - assert_eq!( - format!( - "{}", - Error::PartialBuffer { - expected: 100, - completed: 90 - } - ), - "only used 90 bytes in 100 long buffer" - ); - } - #[test] fn misaligned_ref() { let mut a = [0u8; 3];