From 1f595187595c78ad083a6ed776571e2e456d552c Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Wed, 5 Feb 2025 07:48:37 +0000 Subject: [PATCH 1/7] Add naive default impl for `GuestMemory::find_region()` This function can be default-implemented in terms of `GuestMemory::iter()`. Downstream impls can overwrite this more specialized and efficient versions of course (such as GuestMemoryMmap using a binary search). Signed-off-by: Patrick Roy --- CHANGELOG.md | 2 ++ src/guest_memory.rs | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1881e7bb..ea6caa5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ - \[[#307](https://github.com/rust-vmm/vm-memory/pull/304)\] Move `read_volatile_from`, `read_exact_volatile_from`, `write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` a default implementation, + based on linear search. ### Removed diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 189ea8dd..9ece9ae0 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -408,7 +408,9 @@ pub trait GuestMemory { fn num_regions(&self) -> usize; /// Returns the region containing the specified address or `None`. - fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>; + fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { + self.iter().find(|region| addr >= region.start_addr() && addr <= region.last_addr()) + } /// Gets an iterator over the entries in the collection. /// From 0d7765be9a1bd7f09e7d037a253e0aa5f93a6887 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 12:13:44 +0000 Subject: [PATCH 2/7] introduce `region` module This modules is intended for all functionality that relates to contiguous regions of guest memory. This differentiates it from `guest_memory`, as that is about a holistic view of guest memory, and from `mmap`, which is specifically about guest memory regions backed by mmap VMAs. Signed-off-by: Patrick Roy --- src/guest_memory.rs | 139 ++----------------------------------------- src/lib.rs | 5 +- src/mmap/mod.rs | 3 +- src/region.rs | 141 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 151 insertions(+), 137 deletions(-) create mode 100644 src/region.rs diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 9ece9ae0..3d018e04 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -50,10 +50,11 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use crate::address::{Address, AddressValue}; -use crate::bitmap::{Bitmap, BS, MS}; +use crate::bitmap::MS; use crate::bytes::{AtomicAccess, Bytes}; use crate::io::{ReadVolatile, WriteVolatile}; use crate::volatile_memory::{self, VolatileSlice}; +use crate::GuestMemoryRegion; /// Errors associated with handling guest memory accesses. #[allow(missing_docs)] @@ -158,139 +159,6 @@ impl FileOffset { } } -/// Represents a continuous region of guest physical memory. -#[allow(clippy::len_without_is_empty)] -pub trait GuestMemoryRegion: Bytes { - /// Type used for dirty memory tracking. - type B: Bitmap; - - /// Returns the size of the region. - fn len(&self) -> GuestUsize; - - /// Returns the minimum (inclusive) address managed by the region. - fn start_addr(&self) -> GuestAddress; - - /// Returns the maximum (inclusive) address managed by the region. - fn last_addr(&self) -> GuestAddress { - // unchecked_add is safe as the region bounds were checked when it was created. - self.start_addr().unchecked_add(self.len() - 1) - } - - /// Borrow the associated `Bitmap` object. - fn bitmap(&self) -> &Self::B; - - /// Returns the given address if it is within this region. - fn check_address(&self, addr: MemoryRegionAddress) -> Option { - if self.address_in_range(addr) { - Some(addr) - } else { - None - } - } - - /// Returns `true` if the given address is within this region. - fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { - addr.raw_value() < self.len() - } - - /// Returns the address plus the offset if it is in this region. - fn checked_offset( - &self, - base: MemoryRegionAddress, - offset: usize, - ) -> Option { - base.checked_add(offset as u64) - .and_then(|addr| self.check_address(addr)) - } - - /// Tries to convert an absolute address to a relative address within this region. - /// - /// Returns `None` if `addr` is out of the bounds of this region. - fn to_region_addr(&self, addr: GuestAddress) -> Option { - addr.checked_offset_from(self.start_addr()) - .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) - } - - /// Returns the host virtual address corresponding to the region address. - /// - /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, - /// have the capability to mmap guest address range into host virtual address space for - /// direct access, so the corresponding host virtual address may be passed to other subsystems. - /// - /// # Note - /// The underlying guest memory is not protected from memory aliasing, which breaks the - /// Rust memory safety model. It's the caller's responsibility to ensure that there's no - /// concurrent accesses to the underlying guest memory. - fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { - Err(Error::HostAddressNotAvailable) - } - - /// Returns information regarding the file and offset backing this memory region. - fn file_offset(&self) -> Option<&FileOffset> { - None - } - - /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at - /// `offset`. - #[allow(unused_variables)] - fn get_slice( - &self, - offset: MemoryRegionAddress, - count: usize, - ) -> Result>> { - Err(Error::HostAddressNotAvailable) - } - - /// Gets a slice of memory for the entire region that supports volatile access. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; - /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; - /// # - /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) - /// .expect("Could not create guest memory"); - /// let slice = region - /// .as_volatile_slice() - /// .expect("Could not get volatile slice"); - /// - /// let v = 42u32; - /// let r = slice - /// .get_ref::(0x200) - /// .expect("Could not get reference"); - /// r.store(v); - /// assert_eq!(r.load(), v); - /// # } - /// ``` - fn as_volatile_slice(&self) -> Result>> { - self.get_slice(MemoryRegionAddress(0), self.len() as usize) - } - - /// Show if the region is based on the `HugeTLBFS`. - /// Returns Some(true) if the region is backed by hugetlbfs. - /// None represents that no information is available. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; - /// let addr = GuestAddress(0x1000); - /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); - /// let r = mem.find_region(addr).unwrap(); - /// assert_eq!(r.is_hugetlbfs(), None); - /// # } - /// ``` - #[cfg(target_os = "linux")] - fn is_hugetlbfs(&self) -> Option { - None - } -} - /// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object. /// The vm-memory crate already provides trivial implementation for /// references to `GuestMemory` or reference-counted `GuestMemory` objects, @@ -409,7 +277,8 @@ pub trait GuestMemory { /// Returns the region containing the specified address or `None`. fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { - self.iter().find(|region| addr >= region.start_addr() && addr <= region.last_addr()) + self.iter() + .find(|region| addr >= region.start_addr() && addr <= region.last_addr()) } /// Gets an iterator over the entries in the collection. diff --git a/src/lib.rs b/src/lib.rs index b8fe5f40..886602a3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,9 +47,12 @@ pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize}; pub mod guest_memory; pub use guest_memory::{ Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, - GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, + GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, }; +pub mod region; +pub use region::GuestMemoryRegion; + pub mod io; pub use io::{ReadVolatile, WriteVolatile}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 9f9f1939..a68ae2b5 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -23,8 +23,9 @@ use std::sync::Arc; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{ - self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, + self, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, }; +use crate::region::GuestMemoryRegion; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; use crate::{AtomicAccess, Bytes, ReadVolatile, WriteVolatile}; diff --git a/src/region.rs b/src/region.rs new file mode 100644 index 00000000..c20dbea0 --- /dev/null +++ b/src/region.rs @@ -0,0 +1,141 @@ +//! Module containing abstracts for dealing with contiguous regions of guest memory + +use crate::bitmap::{Bitmap, BS}; +use crate::guest_memory::Error; +use crate::guest_memory::Result; +use crate::{ + Address, Bytes, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress, VolatileSlice, +}; + +/// Represents a continuous region of guest physical memory. +#[allow(clippy::len_without_is_empty)] +pub trait GuestMemoryRegion: Bytes { + /// Type used for dirty memory tracking. + type B: Bitmap; + + /// Returns the size of the region. + fn len(&self) -> GuestUsize; + + /// Returns the minimum (inclusive) address managed by the region. + fn start_addr(&self) -> GuestAddress; + + /// Returns the maximum (inclusive) address managed by the region. + fn last_addr(&self) -> GuestAddress { + // unchecked_add is safe as the region bounds were checked when it was created. + self.start_addr().unchecked_add(self.len() - 1) + } + + /// Borrow the associated `Bitmap` object. + fn bitmap(&self) -> &Self::B; + + /// Returns the given address if it is within this region. + fn check_address(&self, addr: MemoryRegionAddress) -> Option { + if self.address_in_range(addr) { + Some(addr) + } else { + None + } + } + + /// Returns `true` if the given address is within this region. + fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { + addr.raw_value() < self.len() + } + + /// Returns the address plus the offset if it is in this region. + fn checked_offset( + &self, + base: MemoryRegionAddress, + offset: usize, + ) -> Option { + base.checked_add(offset as u64) + .and_then(|addr| self.check_address(addr)) + } + + /// Tries to convert an absolute address to a relative address within this region. + /// + /// Returns `None` if `addr` is out of the bounds of this region. + fn to_region_addr(&self, addr: GuestAddress) -> Option { + addr.checked_offset_from(self.start_addr()) + .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) + } + + /// Returns the host virtual address corresponding to the region address. + /// + /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, + /// have the capability to mmap guest address range into host virtual address space for + /// direct access, so the corresponding host virtual address may be passed to other subsystems. + /// + /// # Note + /// The underlying guest memory is not protected from memory aliasing, which breaks the + /// Rust memory safety model. It's the caller's responsibility to ensure that there's no + /// concurrent accesses to the underlying guest memory. + fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { + Err(Error::HostAddressNotAvailable) + } + + /// Returns information regarding the file and offset backing this memory region. + fn file_offset(&self) -> Option<&FileOffset> { + None + } + + /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at + /// `offset`. + #[allow(unused_variables)] + fn get_slice( + &self, + offset: MemoryRegionAddress, + count: usize, + ) -> Result>> { + Err(Error::HostAddressNotAvailable) + } + + /// Gets a slice of memory for the entire region that supports volatile access. + /// + /// # Examples (uses the `backend-mmap` feature) + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; + /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; + /// # + /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) + /// .expect("Could not create guest memory"); + /// let slice = region + /// .as_volatile_slice() + /// .expect("Could not get volatile slice"); + /// + /// let v = 42u32; + /// let r = slice + /// .get_ref::(0x200) + /// .expect("Could not get reference"); + /// r.store(v); + /// assert_eq!(r.load(), v); + /// # } + /// ``` + fn as_volatile_slice(&self) -> Result>> { + self.get_slice(MemoryRegionAddress(0), self.len() as usize) + } + + /// Show if the region is based on the `HugeTLBFS`. + /// Returns Some(true) if the region is backed by hugetlbfs. + /// None represents that no information is available. + /// + /// # Examples (uses the `backend-mmap` feature) + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; + /// let addr = GuestAddress(0x1000); + /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); + /// let r = mem.find_region(addr).unwrap(); + /// assert_eq!(r.is_hugetlbfs(), None); + /// # } + /// ``` + #[cfg(target_os = "linux")] + fn is_hugetlbfs(&self) -> Option { + None + } +} From f1718b8b2341795ef083feb54f5ca0a346a7c713 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Wed, 5 Feb 2025 11:29:53 +0000 Subject: [PATCH 3/7] Generalize GuestMemoryMmap to arbitrary GuestMemoryRegions Add the concept of a `GuestRegionCollection`, which just manages a list of some `GuestMemoryRegion` impls. Functionality wise, it offers the same implementations as `GuestMemoryMmap`. As a result, turn `GuestMemoryMmap` into a type alias for `GuestRegionCollection` with a fixed `R = GuestRegionMmap`. The error type handling is a bit wack, but this is needed to preserve backwards compatibility: The error type of GuestRegionCollection must match what GuestMemoryMmap historically returned, so the error type needs to be lifted from mmap.rs - however, it contains specific variants that are only relevant to GuestMemoryMmap, so cfg those behind the `backend-mmap` feature flag (as to why this specific error type gets be privilege of just being reexported as `Error` from this crate: No idea, but its pre-existing, and changing it would be a breaking change). Signed-off-by: Patrick Roy --- CHANGELOG.md | 2 + src/lib.rs | 4 +- src/mmap/mod.rs | 181 +++++++----------------------------------------- src/region.rs | 162 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 190 insertions(+), 159 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea6caa5f..ff662a2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ ### Added - \[[#311](https://github.com/rust-vmm/vm-memory/pull/311)\] Allow compiling without the ReadVolatile and WriteVolatile implementations +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\] `GuestRegionContainer`, a generic container of `GuestMemoryRegion`s, generalizing `GuestMemoryMmap` (which + is now a type alias for `GuestRegionContainer`) ### Changed diff --git a/src/lib.rs b/src/lib.rs index 886602a3..d89f8459 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,7 +51,7 @@ pub use guest_memory::{ }; pub mod region; -pub use region::GuestMemoryRegion; +pub use region::{GuestMemoryRegion, GuestRegionCollection, GuestRegionError as Error}; pub mod io; pub use io::{ReadVolatile, WriteVolatile}; @@ -60,7 +60,7 @@ pub use io::{ReadVolatile, WriteVolatile}; pub mod mmap; #[cfg(feature = "backend-mmap")] -pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion}; +pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapRegion}; #[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] pub use mmap::{MmapRange, MmapXenFlags}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index a68ae2b5..07af5916 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -18,16 +18,13 @@ use std::io::{Seek, SeekFrom}; use std::ops::Deref; use std::result; use std::sync::atomic::Ordering; -use std::sync::Arc; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::{ - self, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, -}; +use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; use crate::region::GuestMemoryRegion; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; -use crate::{AtomicAccess, Bytes, ReadVolatile, WriteVolatile}; +use crate::{AtomicAccess, Bytes, Error, GuestRegionCollection, ReadVolatile, WriteVolatile}; // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; @@ -52,27 +49,6 @@ pub use std::io::Error as MmapRegionError; #[cfg(target_family = "windows")] pub use windows::MmapRegion; -/// Errors that can occur when creating a memory map. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Adding the guest base address to the length of the underlying mapping resulted - /// in an overflow. - #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] - InvalidGuestRegion, - /// Error creating a `MmapRegion` object. - #[error("{0}")] - MmapRegion(MmapRegionError), - /// No memory region found. - #[error("No memory region found")] - NoMemoryRegion, - /// Some of the memory regions intersect with each other. - #[error("Some of the memory regions intersect with each other")] - MemoryRegionOverlap, - /// The provided memory regions haven't been sorted. - #[error("The provided memory regions haven't been sorted")] - UnsortedMemoryRegions, -} - // TODO: use this for Windows as well after we redefine the Error type there. #[cfg(target_family = "unix")] /// Checks if a mapping of `size` bytes fits at the provided `file_offset`. @@ -369,17 +345,9 @@ impl GuestMemoryRegion for GuestRegionMmap { /// Represents the entire physical memory of the guest by tracking all its memory regions. /// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the /// virtual address space of the calling process. -#[derive(Clone, Debug, Default)] -pub struct GuestMemoryMmap { - regions: Vec>>, -} +pub type GuestMemoryMmap = GuestRegionCollection>; impl GuestMemoryMmap { - /// Creates an empty `GuestMemoryMmap` instance. - pub fn new() -> Self { - Self::default() - } - /// Creates a container and allocates anonymous memory for guest memory regions. /// /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. @@ -407,111 +375,6 @@ impl GuestMemoryMmap { } } -impl GuestMemoryMmap { - /// Creates a new `GuestMemoryMmap` from a vector of regions. - /// - /// # Arguments - /// - /// * `regions` - The vector of regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_regions(mut regions: Vec>) -> result::Result { - Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) - } - - /// Creates a new `GuestMemoryMmap` from a vector of Arc regions. - /// - /// Similar to the constructor `from_regions()` as it returns a - /// `GuestMemoryMmap`. The need for this constructor is to provide a way for - /// consumer of this API to create a new `GuestMemoryMmap` based on existing - /// regions coming from an existing `GuestMemoryMmap` instance. - /// - /// # Arguments - /// - /// * `regions` - The vector of `Arc` regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_arc_regions(regions: Vec>>) -> result::Result { - if regions.is_empty() { - return Err(Error::NoMemoryRegion); - } - - for window in regions.windows(2) { - let prev = &window[0]; - let next = &window[1]; - - if prev.start_addr() > next.start_addr() { - return Err(Error::UnsortedMemoryRegions); - } - - if prev.last_addr() >= next.start_addr() { - return Err(Error::MemoryRegionOverlap); - } - } - - Ok(Self { regions }) - } - - /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. - /// - /// # Arguments - /// * `region`: the memory region to insert into the guest memory object. - pub fn insert_region( - &self, - region: Arc>, - ) -> result::Result, Error> { - let mut regions = self.regions.clone(); - regions.push(region); - regions.sort_by_key(|x| x.start_addr()); - - Self::from_arc_regions(regions) - } - - /// Remove a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap` - /// on success, together with the removed region. - /// - /// # Arguments - /// * `base`: base address of the region to be removed - /// * `size`: size of the region to be removed - pub fn remove_region( - &self, - base: GuestAddress, - size: GuestUsize, - ) -> result::Result<(GuestMemoryMmap, Arc>), Error> { - if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { - if self.regions.get(region_index).unwrap().mapping.size() as GuestUsize == size { - let mut regions = self.regions.clone(); - let region = regions.remove(region_index); - return Ok((Self { regions }, region)); - } - } - - Err(Error::InvalidGuestRegion) - } -} - -impl GuestMemory for GuestMemoryMmap { - type R = GuestRegionMmap; - - fn num_regions(&self) -> usize { - self.regions.len() - } - - fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap> { - let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { - Ok(x) => Some(x), - // Within the closest region with starting address < addr - Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), - _ => None, - }; - index.map(|x| self.regions[x].as_ref()) - } - - fn iter(&self) -> impl Iterator { - self.regions.iter().map(AsRef::as_ref) - } -} - #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] @@ -521,16 +384,17 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::GuestAddressSpace; + use crate::{Error, GuestAddressSpace, GuestMemory}; use std::io::Write; use std::mem; + use std::sync::Arc; #[cfg(feature = "rawfd")] use std::{fs::File, path::Path}; use vmm_sys_util::tempfile::TempFile; - type GuestMemoryMmap = super::GuestMemoryMmap<()>; type GuestRegionMmap = super::GuestRegionMmap<()>; + type GuestMemoryMmap = super::GuestRegionCollection; type MmapRegion = super::MmapRegion<()>; #[test] @@ -555,9 +419,8 @@ mod tests { } assert_eq!(guest_mem.last_addr(), last_addr); } - for ((region_addr, region_size), mmap) in expected_regions_summary - .iter() - .zip(guest_mem.regions.iter()) + for ((region_addr, region_size), mmap) in + expected_regions_summary.iter().zip(guest_mem.iter()) { assert_eq!(region_addr, &mmap.guest_base); assert_eq!(region_size, &mmap.mapping.size()); @@ -748,7 +611,7 @@ mod tests { let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)]; let guest_mem = GuestMemoryMmap::new(); - assert_eq!(guest_mem.regions.len(), 0); + assert_eq!(guest_mem.num_regions(), 0); check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary); @@ -1086,8 +949,10 @@ mod tests { .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); - assert_eq!(gm.regions[0].guest_base, regions[0].0); - assert_eq!(gm.regions[1].guest_base, regions[1].0); + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].guest_base, regions[0].0); + assert_eq!(mmap_regions[1].guest_base, regions[1].0); } #[test] @@ -1115,8 +980,10 @@ mod tests { .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); - assert_eq!(gm.regions[0].guest_base, regions[0].0); - assert_eq!(gm.regions[1].guest_base, regions[1].0); + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].guest_base, regions[0].0); + assert_eq!(mmap_regions[1].guest_base, regions[1].0); } #[test] @@ -1225,11 +1092,13 @@ mod tests { assert_eq!(mem_orig.num_regions(), 2); assert_eq!(gm.num_regions(), 5); - assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000)); - assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000)); - assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000)); - assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000)); + let regions = gm.iter().collect::>(); + + assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); + assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); + assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); + assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); } #[test] @@ -1250,7 +1119,7 @@ mod tests { assert_eq!(mem_orig.num_regions(), 2); assert_eq!(gm.num_regions(), 1); - assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); } diff --git a/src/region.rs b/src/region.rs index c20dbea0..626802bf 100644 --- a/src/region.rs +++ b/src/region.rs @@ -4,8 +4,10 @@ use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::Error; use crate::guest_memory::Result; use crate::{ - Address, Bytes, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress, VolatileSlice, + Address, Bytes, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, + VolatileSlice, }; +use std::sync::Arc; /// Represents a continuous region of guest physical memory. #[allow(clippy::len_without_is_empty)] @@ -139,3 +141,161 @@ pub trait GuestMemoryRegion: Bytes { None } } + +/// Errors that can occur when dealing with [`GuestRegion`]s, or collections thereof +#[derive(Debug, thiserror::Error)] +pub enum GuestRegionError { + /// Adding the guest base address to the length of the underlying mapping resulted + /// in an overflow. + #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] + #[cfg(feature = "backend-mmap")] + InvalidGuestRegion, + /// Error creating a `MmapRegion` object. + #[error("{0}")] + #[cfg(feature = "backend-mmap")] + MmapRegion(crate::mmap::MmapRegionError), + /// No memory region found. + #[error("No memory region found")] + NoMemoryRegion, + /// Some of the memory regions intersect with each other. + #[error("Some of the memory regions intersect with each other")] + MemoryRegionOverlap, + /// The provided memory regions haven't been sorted. + #[error("The provided memory regions haven't been sorted")] + UnsortedMemoryRegions, +} + +/// [`GuestMemory`](trait.GuestMemory.html) implementation based on a homogeneous collection +/// of [`GuestMemoryRegion`] implementations. +/// +/// Represents a sorted set of non-overlapping physical guest memory regions. +#[derive(Debug)] +pub struct GuestRegionCollection { + regions: Vec>, +} + +impl Default for GuestRegionCollection { + fn default() -> Self { + Self { + regions: Vec::new(), + } + } +} + +impl Clone for GuestRegionCollection { + fn clone(&self) -> Self { + GuestRegionCollection { + regions: self.regions.iter().map(Arc::clone).collect(), + } + } +} + +impl GuestRegionCollection { + /// Creates an empty `GuestMemoryMmap` instance. + pub fn new() -> Self { + Self::default() + } + + /// Creates a new [`GuestRegionCollection`] from a vector of regions. + /// + /// # Arguments + /// + /// * `regions` - The vector of regions. + /// The regions shouldn't overlap, and they should be sorted + /// by the starting address. + pub fn from_regions(mut regions: Vec) -> std::result::Result { + Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) + } + + /// Creates a new [`GuestRegionCollection`] from a vector of Arc regions. + /// + /// Similar to the constructor `from_regions()` as it returns a + /// [`GuestRegionCollection`]. The need for this constructor is to provide a way for + /// consumer of this API to create a new [`GuestRegionCollection`] based on existing + /// regions coming from an existing [`GuestRegionCollection`] instance. + /// + /// # Arguments + /// + /// * `regions` - The vector of `Arc` regions. + /// The regions shouldn't overlap and they should be sorted + /// by the starting address. + pub fn from_arc_regions(regions: Vec>) -> std::result::Result { + if regions.is_empty() { + return Err(GuestRegionError::NoMemoryRegion); + } + + for window in regions.windows(2) { + let prev = &window[0]; + let next = &window[1]; + + if prev.start_addr() > next.start_addr() { + return Err(GuestRegionError::UnsortedMemoryRegions); + } + + if prev.last_addr() >= next.start_addr() { + return Err(GuestRegionError::MemoryRegionOverlap); + } + } + + Ok(Self { regions }) + } + + /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. + /// + /// # Arguments + /// * `region`: the memory region to insert into the guest memory object. + pub fn insert_region( + &self, + region: Arc, + ) -> std::result::Result, GuestRegionError> { + let mut regions = self.regions.clone(); + regions.push(region); + regions.sort_by_key(|x| x.start_addr()); + + Self::from_arc_regions(regions) + } + + /// Remove a region from the [`GuestRegionCollection`] object and return a new `GuestRegionCollection` + /// on success, together with the removed region. + /// + /// # Arguments + /// * `base`: base address of the region to be removed + /// * `size`: size of the region to be removed + pub fn remove_region( + &self, + base: GuestAddress, + size: GuestUsize, + ) -> std::result::Result<(GuestRegionCollection, Arc), GuestRegionError> { + if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { + if self.regions.get(region_index).unwrap().len() == size { + let mut regions = self.regions.clone(); + let region = regions.remove(region_index); + return Ok((Self { regions }, region)); + } + } + + Err(GuestRegionError::NoMemoryRegion) + } +} + +impl GuestMemory for GuestRegionCollection { + type R = R; + + fn num_regions(&self) -> usize { + self.regions.len() + } + + fn find_region(&self, addr: GuestAddress) -> Option<&R> { + let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { + Ok(x) => Some(x), + // Within the closest region with starting address < addr + Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), + _ => None, + }; + index.map(|x| self.regions[x].as_ref()) + } + + fn iter(&self) -> impl Iterator { + self.regions.iter().map(AsRef::as_ref) + } +} From 8de505fef53671a023ed84802284bc399b6642b2 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Wed, 5 Feb 2025 13:25:13 +0000 Subject: [PATCH 4/7] refactor: use `matches!` instead of to_string() for tests Some tests that were explicitly testing for error conditions used converted errors to strings to determine whether two errors are the same (by saying they're only the same if their string representation was identical). Replace this with more roboust assertions on `matches!`. Signed-off-by: Patrick Roy --- src/mmap/mod.rs | 180 +++++++++++++---------------------------- src/mmap/unix.rs | 8 +- src/mmap/xen.rs | 30 +++---- src/volatile_memory.rs | 52 ------------ 4 files changed, 71 insertions(+), 199 deletions(-) diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 07af5916..d45d7c96 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -384,7 +384,7 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::{Error, GuestAddressSpace, GuestMemory}; + use crate::{Error, GuestAddressSpace, GuestMemory, GuestMemoryError}; use std::io::Write; use std::mem; @@ -481,129 +481,66 @@ mod tests { fn test_no_memory_region() { let regions_summary = []; - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); + assert!(matches!( + new_guest_memory_mmap(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); } #[test] fn test_overlapping_memory_regions() { let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)]; - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); + assert!(matches!( + new_guest_memory_mmap(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); } #[test] fn test_unsorted_memory_regions() { let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)]; - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); + assert!(matches!( + new_guest_memory_mmap(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); } #[test] @@ -828,18 +765,13 @@ mod tests { for gm in gm_list.iter() { let val1: u64 = 0xaa55_aa55_aa55_aa55; let val2: u64 = 0x55aa_55aa_55aa_55aa; - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()), - format!("InvalidGuestAddress({:?})", bad_addr,) - ); - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()), - format!( - "PartialBuffer {{ expected: {:?}, completed: {:?} }}", - mem::size_of::(), - max_addr.checked_offset_from(bad_addr2).unwrap() - ) - ); + assert!(matches!( + gm.write_obj(val1, bad_addr).unwrap_err(), + GuestMemoryError::InvalidGuestAddress(addr) if addr == bad_addr + )); + assert!(matches!( + gm.write_obj(val1, bad_addr2).unwrap_err(), + GuestMemoryError::PartialBuffer { expected, completed} if expected == size_of::() && completed == max_addr.checked_offset_from(bad_addr2).unwrap() as usize)); gm.write_obj(val1, GuestAddress(0x500)).unwrap(); gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); diff --git a/src/mmap/unix.rs b/src/mmap/unix.rs index 752f05f1..58b031a3 100644 --- a/src/mmap/unix.rs +++ b/src/mmap/unix.rs @@ -558,7 +558,7 @@ mod tests { prot, flags, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); // Offset + size is greater than the size of the file (which is 0 at this point). let r = MmapRegion::build( @@ -567,7 +567,7 @@ mod tests { prot, flags, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "MappingPastEof"); + assert!(matches!(r.unwrap_err(), Error::MappingPastEof)); // MAP_FIXED was specified among the flags. let r = MmapRegion::build( @@ -576,7 +576,7 @@ mod tests { prot, flags | libc::MAP_FIXED, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "MapFixed"); + assert!(matches!(r.unwrap_err(), Error::MapFixed)); // Let's resize the file. assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); @@ -621,7 +621,7 @@ mod tests { let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; let r = unsafe { MmapRegion::build_raw((addr + 1) as *mut u8, size, prot, flags) }; - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidPointer"); + assert!(matches!(r.unwrap_err(), Error::InvalidPointer)); let r = unsafe { MmapRegion::build_raw(addr as *mut u8, size, prot, flags).unwrap() }; diff --git a/src/mmap/xen.rs b/src/mmap/xen.rs index 7c5c0670..590c34a1 100644 --- a/src/mmap/xen.rs +++ b/src/mmap/xen.rs @@ -1077,26 +1077,18 @@ mod tests { range.mmap_flags = 16; let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!("MmapFlags({})", range.mmap_flags), - ); + assert!(matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == range.mmap_flags)); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits(); let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!("MmapFlags({:x})", MmapXenFlags::ALL.bits()), + assert!( + matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::ALL.bits()) ); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!( - "MmapFlags({:x})", - MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits(), - ), + assert!( + matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits()) ); } @@ -1132,17 +1124,17 @@ mod tests { range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)); range.prot = None; let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); let mut range = MmapRange::initialized(true); range.size = 0; @@ -1164,7 +1156,7 @@ mod tests { let mut range = MmapRange::initialized(true); range.prot = None; let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.prot = None; @@ -1174,12 +1166,12 @@ mod tests { let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); let mut range = MmapRange::initialized(true); range.size = 0; diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index 43c1d206..acff239b 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -1488,58 +1488,6 @@ mod tests { slice.compute_end_offset(6, 0).unwrap_err(); } - #[test] - fn test_display_error() { - assert_eq!( - format!("{}", Error::OutOfBounds { addr: 0x10 }), - "address 0x10 is out of bounds" - ); - - assert_eq!( - format!( - "{}", - Error::Overflow { - base: 0x0, - offset: 0x10 - } - ), - "address 0x0 offset by 0x10 would overflow" - ); - - assert_eq!( - format!( - "{}", - Error::TooBig { - nelements: 100_000, - size: 1_000_000_000 - } - ), - "100000 elements of size 1000000000 would overflow a usize" - ); - - assert_eq!( - format!( - "{}", - Error::Misaligned { - addr: 0x4, - alignment: 8 - } - ), - "address 0x4 is not aligned to 8" - ); - - assert_eq!( - format!( - "{}", - Error::PartialBuffer { - expected: 100, - completed: 90 - } - ), - "only used 90 bytes in 100 long buffer" - ); - } - #[test] fn misaligned_ref() { let mut a = [0u8; 3]; From 23d03f9cbb5fc47f0240e196415093595a4c2f90 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 08:58:32 +0000 Subject: [PATCH 5/7] Provide generic impl Bytes for R: GuestMemoryRegion This trait implementation of GuestMemoryRegion for `GuestRegionMmap`does not actually make use of the specifics of `GuestRegionMmap`, so can be completely generic in terms of `GuestMemoryRegion`. This allows us to move it to guest_memory.rs, eliminating one further instance of code depending on exactly one of mmap_unix.rs and mmap_xen.rs being compiled in. However, Paolo pointed out that sometimes this default impl might not be desired, for example QEMU might want some GuestMemoryRegion impl that represents PCI BARs. So hide this impl behind a marker trait, GuestMemoryRegionBytes, being implemented. Replace .unwrap() with error propagation via `?`, as we no longer know that we are dealing with a specific GuestMemoryRegion impl that always implements as_volatile_slice(). Signed-off-by: Patrick Roy --- CHANGELOG.md | 2 + src/mmap/mod.rs | 157 ++-------------------------------------------- src/region.rs | 162 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 163 insertions(+), 158 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff662a2f..4823eecd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ `write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait. - \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` a default implementation, based on linear search. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Implement `Bytes` generically + for all `R: GuestMemoryRegion`. ### Removed diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index d45d7c96..d9b75a7c 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -17,14 +17,13 @@ use std::borrow::Borrow; use std::io::{Seek, SeekFrom}; use std::ops::Deref; use std::result; -use std::sync::atomic::Ordering; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; -use crate::region::GuestMemoryRegion; +use crate::region::{GuestMemoryRegion, GuestMemoryRegionBytes}; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; -use crate::{AtomicAccess, Bytes, Error, GuestRegionCollection, ReadVolatile, WriteVolatile}; +use crate::{Error, GuestRegionCollection}; // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; @@ -145,154 +144,6 @@ impl GuestRegionMmap { } } -impl Bytes for GuestRegionMmap { - type E = guest_memory::Error; - - /// # Examples - /// * Write a slice at guest address 0x1200. - /// - /// ``` - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let res = gm - /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) - /// .expect("Could not write to guest memory"); - /// assert_eq!(5, res); - /// ``` - fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .write(buf, maddr) - .map_err(Into::into) - } - - /// # Examples - /// * Read a slice of length 16 at guestaddress 0x1200. - /// - /// ``` - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let buf = &mut [0u8; 16]; - /// let res = gm - /// .read(buf, GuestAddress(0x1200)) - /// .expect("Could not read from guest memory"); - /// assert_eq!(16, res); - /// ``` - fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .read(buf, maddr) - .map_err(Into::into) - } - - fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .write_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .read_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result - where - F: ReadVolatile, - { - self.as_volatile_slice() - .unwrap() - .read_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn read_exact_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: ReadVolatile, - { - self.as_volatile_slice() - .unwrap() - .read_exact_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn write_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result - where - F: WriteVolatile, - { - self.as_volatile_slice() - .unwrap() - .write_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn write_all_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: WriteVolatile, - { - self.as_volatile_slice() - .unwrap() - .write_all_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn store( - &self, - val: T, - addr: MemoryRegionAddress, - order: Ordering, - ) -> guest_memory::Result<()> { - self.as_volatile_slice().and_then(|s| { - s.store(val, addr.raw_value() as usize, order) - .map_err(Into::into) - }) - } - - fn load( - &self, - addr: MemoryRegionAddress, - order: Ordering, - ) -> guest_memory::Result { - self.as_volatile_slice() - .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) - } -} - impl GuestMemoryRegion for GuestRegionMmap { type B = B; @@ -339,6 +190,8 @@ impl GuestMemoryRegion for GuestRegionMmap { } } +impl GuestMemoryRegionBytes for GuestRegionMmap {} + /// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory /// in the current process. /// @@ -384,7 +237,7 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::{Error, GuestAddressSpace, GuestMemory, GuestMemoryError}; + use crate::{Bytes, Error, GuestAddressSpace, GuestMemory, GuestMemoryError}; use std::io::Write; use std::mem; diff --git a/src/region.rs b/src/region.rs index 626802bf..a531d403 100644 --- a/src/region.rs +++ b/src/region.rs @@ -1,17 +1,17 @@ //! Module containing abstracts for dealing with contiguous regions of guest memory use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::Error; use crate::guest_memory::Result; use crate::{ - Address, Bytes, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, - VolatileSlice, + Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, + GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile, }; +use std::sync::atomic::Ordering; use std::sync::Arc; /// Represents a continuous region of guest physical memory. #[allow(clippy::len_without_is_empty)] -pub trait GuestMemoryRegion: Bytes { +pub trait GuestMemoryRegion: Bytes { /// Type used for dirty memory tracking. type B: Bitmap; @@ -73,7 +73,7 @@ pub trait GuestMemoryRegion: Bytes { /// Rust memory safety model. It's the caller's responsibility to ensure that there's no /// concurrent accesses to the underlying guest memory. fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { - Err(Error::HostAddressNotAvailable) + Err(GuestMemoryError::HostAddressNotAvailable) } /// Returns information regarding the file and offset backing this memory region. @@ -89,7 +89,7 @@ pub trait GuestMemoryRegion: Bytes { offset: MemoryRegionAddress, count: usize, ) -> Result>> { - Err(Error::HostAddressNotAvailable) + Err(GuestMemoryError::HostAddressNotAvailable) } /// Gets a slice of memory for the entire region that supports volatile access. @@ -299,3 +299,153 @@ impl GuestMemory for GuestRegionCollection { self.regions.iter().map(AsRef::as_ref) } } + +/// A marker trait that if implemented on a type `R` makes available a default +/// implementation of `Bytes` for `R`, based on the assumption +/// that the entire `GuestMemoryRegion` is just traditional memory without any +/// special access requirements. +pub trait GuestMemoryRegionBytes: GuestMemoryRegion {} + +impl Bytes for R { + type E = GuestMemoryError; + + /// # Examples + /// * Write a slice at guest address 0x1200. + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + /// # + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # let start_addr = GuestAddress(0x1000); + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) + /// # .expect("Could not create guest memory"); + /// # + /// let res = gm + /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) + /// .expect("Could not write to guest memory"); + /// assert_eq!(5, res); + /// # } + /// ``` + fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .write(buf, maddr) + .map_err(Into::into) + } + + /// # Examples + /// * Read a slice of length 16 at guestaddress 0x1200. + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + /// # + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # let start_addr = GuestAddress(0x1000); + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) + /// # .expect("Could not create guest memory"); + /// # + /// let buf = &mut [0u8; 16]; + /// let res = gm + /// .read(buf, GuestAddress(0x1200)) + /// .expect("Could not read from guest memory"); + /// assert_eq!(16, res); + /// # } + /// ``` + fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .read(buf, maddr) + .map_err(Into::into) + } + + fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .write_slice(buf, maddr) + .map_err(Into::into) + } + + fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .read_slice(buf, maddr) + .map_err(Into::into) + } + + fn read_volatile_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> Result + where + F: ReadVolatile, + { + self.as_volatile_slice()? + .read_volatile_from(addr.0 as usize, src, count) + .map_err(Into::into) + } + + fn read_exact_volatile_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> Result<()> + where + F: ReadVolatile, + { + self.as_volatile_slice()? + .read_exact_volatile_from(addr.0 as usize, src, count) + .map_err(Into::into) + } + + fn write_volatile_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> Result + where + F: WriteVolatile, + { + self.as_volatile_slice()? + .write_volatile_to(addr.0 as usize, dst, count) + .map_err(Into::into) + } + + fn write_all_volatile_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> Result<()> + where + F: WriteVolatile, + { + self.as_volatile_slice()? + .write_all_volatile_to(addr.0 as usize, dst, count) + .map_err(Into::into) + } + + fn store( + &self, + val: T, + addr: MemoryRegionAddress, + order: Ordering, + ) -> Result<()> { + self.as_volatile_slice().and_then(|s| { + s.store(val, addr.raw_value() as usize, order) + .map_err(Into::into) + }) + } + + fn load(&self, addr: MemoryRegionAddress, order: Ordering) -> Result { + self.as_volatile_slice() + .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) + } +} From 8bd109112ab04bce588bc32a9fb3fa79617529bf Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 14:18:10 +0000 Subject: [PATCH 6/7] test: move GuestRegionCollection specific tests to region.rs Move some tests that are all about the invariants of GuestRegionCollection constructors to region.rs, where they can be run without the need for the backend-mmap feature (by instead using a mock memory region). While we're at it, fix these tests calling from_regions twice, but from_arc_regions never. Remove some test cases that are superfluous, because since the `regions` field of `GuestRegionCollection` is private, all construction needs to go through `from_regions`/`from_arc_regions`, and testing that wrappers around these functions uphold the invariants of the wrapped functions is not very useful. test_memory and create_vec_with_regions were the same test, so deduplicate while moving to region.rs. Generally, most of these tests could be moved to region.rs, given sufficient mocking of the memory region. I've somewhat arbitrarily drawn the line at "only transfer tests where the mock only needs GuestAddress and length", which roughly translates to "move tests where we are testing the default implementations of the GuestMemory and GuestMemoryRegion traits, which are not overwritten in the mmap-based implementations". Of course, we could write a mock that implements actual allocation of memory via std::alloc::alloc, but at that point we'd be testing the mock more than actual vm-memory code (and start loosing coverage of the mmap implementations). Signed-off-by: Patrick Roy --- src/mmap/mod.rs | 402 +----------------------------------------------- src/region.rs | 327 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 328 insertions(+), 401 deletions(-) diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index d9b75a7c..99729fe3 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -237,11 +237,10 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::{Bytes, Error, GuestAddressSpace, GuestMemory, GuestMemoryError}; + use crate::{Bytes, GuestMemory, GuestMemoryError}; use std::io::Write; use std::mem; - use std::sync::Arc; #[cfg(feature = "rawfd")] use std::{fs::File, path::Path}; use vmm_sys_util::tempfile::TempFile; @@ -256,171 +255,6 @@ mod tests { assert_eq!(1024, m.size()); } - fn check_guest_memory_mmap( - maybe_guest_mem: Result, - expected_regions_summary: &[(GuestAddress, usize)], - ) { - assert!(maybe_guest_mem.is_ok()); - - let guest_mem = maybe_guest_mem.unwrap(); - assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); - let maybe_last_mem_reg = expected_regions_summary.last(); - if let Some((region_addr, region_size)) = maybe_last_mem_reg { - let mut last_addr = region_addr.unchecked_add(*region_size as u64); - if last_addr.raw_value() != 0 { - last_addr = last_addr.unchecked_sub(1); - } - assert_eq!(guest_mem.last_addr(), last_addr); - } - for ((region_addr, region_size), mmap) in - expected_regions_summary.iter().zip(guest_mem.iter()) - { - assert_eq!(region_addr, &mmap.guest_base); - assert_eq!(region_size, &mmap.mapping.size()); - - assert!(guest_mem.find_region(*region_addr).is_some()); - } - } - - fn new_guest_memory_mmap( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_ranges(regions_summary) - } - - fn new_guest_memory_mmap_from_regions( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_regions( - regions_summary - .iter() - .map(|(region_addr, region_size)| { - GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap() - }) - .collect(), - ) - } - - fn new_guest_memory_mmap_from_arc_regions( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_arc_regions( - regions_summary - .iter() - .map(|(region_addr, region_size)| { - Arc::new(GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap()) - }) - .collect(), - ) - } - - fn new_guest_memory_mmap_with_files( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - let regions: Vec<(GuestAddress, usize, Option)> = regions_summary - .iter() - .map(|(region_addr, region_size)| { - let f = TempFile::new().unwrap().into_file(); - f.set_len(*region_size as u64).unwrap(); - - (*region_addr, *region_size, Some(FileOffset::new(f, 0))) - }) - .collect(); - - GuestMemoryMmap::from_ranges_with_files(®ions) - } - - #[test] - fn test_no_memory_region() { - let regions_summary = []; - - assert!(matches!( - new_guest_memory_mmap(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - assert!(matches!( - new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - } - - #[test] - fn test_overlapping_memory_regions() { - let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)]; - - assert!(matches!( - new_guest_memory_mmap(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - assert!(matches!( - new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - } - - #[test] - fn test_unsorted_memory_regions() { - let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)]; - - assert!(matches!( - new_guest_memory_mmap(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - assert!(matches!( - new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - } - - #[test] - fn test_valid_memory_regions() { - let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)]; - - let guest_mem = GuestMemoryMmap::new(); - assert_eq!(guest_mem.num_regions(), 0); - - check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary); - - check_guest_memory_mmap( - new_guest_memory_mmap_with_files(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_mmap_from_regions(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_mmap_from_arc_regions(®ions_summary), - ®ions_summary, - ); - } - #[test] fn slice_addr() { let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap(); @@ -446,64 +280,6 @@ mod tests { assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); } - #[test] - fn test_address_in_range() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert!(guest_mem.address_in_range(GuestAddress(0x200))); - assert!(!guest_mem.address_in_range(GuestAddress(0x600))); - assert!(guest_mem.address_in_range(GuestAddress(0xa00))); - assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); - } - } - - #[test] - fn test_check_address() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert_eq!( - guest_mem.check_address(GuestAddress(0x200)), - Some(GuestAddress(0x200)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); - assert_eq!( - guest_mem.check_address(GuestAddress(0xa00)), - Some(GuestAddress(0xa00)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); - } - } - #[test] fn test_to_region_addr() { let f1 = TempFile::new().unwrap().into_file(); @@ -710,67 +486,6 @@ mod tests { } } - #[test] - fn create_vec_with_regions() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = GuestMemoryMmap::from_ranges(®ions).unwrap(); - - for region in gm.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in gm.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mmap_regions = gm.iter().collect::>(); - - assert_eq!(mmap_regions[0].guest_base, regions[0].0); - assert_eq!(mmap_regions[1].guest_base, regions[1].0); - } - - #[test] - fn test_memory() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem = gm.memory(); - - for region in mem.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mmap_regions = gm.iter().collect::>(); - - assert_eq!(mmap_regions[0].guest_base, regions[0].0); - assert_eq!(mmap_regions[1].guest_base, regions[1].0); - } - #[test] fn test_access_cross_boundary() { let f1 = TempFile::new().unwrap().into_file(); @@ -850,64 +565,6 @@ mod tests { assert_eq!(region.file_offset().unwrap().start(), offset); } - #[test] - fn test_mmap_insert_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0x8000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0x4000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); - gm.insert_region(mmap).unwrap_err(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 5); - - let regions = gm.iter().collect::>(); - - assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); - assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); - assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); - assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); - } - - #[test] - fn test_mmap_remove_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - gm.remove_region(GuestAddress(0), 128).unwrap_err(); - gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); - let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 1); - - assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); - assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); - } - #[test] fn test_guest_memory_mmap_get_slice() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap(); @@ -978,63 +635,6 @@ mod tests { assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); } - #[test] - fn test_checked_offset() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x200), - Some(GuestAddress(0x200)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0xa00), - Some(GuestAddress(0xa00)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr2, 0x7ff), - Some(GuestAddress(0xfff)) - ); - assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); - assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); - - assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x400 - 1), - Some(GuestAddress(0x400 - 1)) - ); - } - - #[test] - fn test_check_range() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert!(guest_mem.check_range(start_addr1, 0x0)); - assert!(guest_mem.check_range(start_addr1, 0x200)); - assert!(guest_mem.check_range(start_addr1, 0x400)); - assert!(!guest_mem.check_range(start_addr1, 0xa00)); - assert!(guest_mem.check_range(start_addr2, 0x7ff)); - assert!(guest_mem.check_range(start_addr2, 0x800)); - assert!(!guest_mem.check_range(start_addr2, 0x801)); - assert!(!guest_mem.check_range(start_addr2, 0xc00)); - assert!(!guest_mem.check_range(start_addr1, usize::MAX)); - } - #[test] fn test_atomic_accesses() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap(); diff --git a/src/region.rs b/src/region.rs index a531d403..16bf16a5 100644 --- a/src/region.rs +++ b/src/region.rs @@ -449,3 +449,330 @@ impl Bytes for R { .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) } } + +#[cfg(test)] +mod tests { + use crate::region::{GuestMemoryRegionBytes, GuestRegionError}; + use crate::{ + Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, + }; + use std::sync::Arc; + + #[derive(Debug, PartialEq, Eq)] + struct MockRegion { + start: GuestAddress, + len: GuestUsize, + } + + impl GuestMemoryRegion for MockRegion { + type B = (); + + fn len(&self) -> GuestUsize { + self.len + } + + fn start_addr(&self) -> GuestAddress { + self.start + } + + fn bitmap(&self) -> &Self::B { + &() + } + } + + impl GuestMemoryRegionBytes for MockRegion {} + + type Collection = GuestRegionCollection; + + fn check_guest_memory_mmap( + maybe_guest_mem: Result, + expected_regions_summary: &[(GuestAddress, u64)], + ) { + assert!(maybe_guest_mem.is_ok()); + + let guest_mem = maybe_guest_mem.unwrap(); + assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); + let maybe_last_mem_reg = expected_regions_summary.last(); + if let Some((region_addr, region_size)) = maybe_last_mem_reg { + let mut last_addr = region_addr.unchecked_add(*region_size); + if last_addr.raw_value() != 0 { + last_addr = last_addr.unchecked_sub(1); + } + assert_eq!(guest_mem.last_addr(), last_addr); + } + for ((region_addr, region_size), mmap) in + expected_regions_summary.iter().zip(guest_mem.iter()) + { + assert_eq!(region_addr, &mmap.start); + assert_eq!(region_size, &mmap.len); + + assert!(guest_mem.find_region(*region_addr).is_some()); + } + } + + fn new_guest_memory_collection_from_regions( + regions_summary: &[(GuestAddress, u64)], + ) -> Result { + Collection::from_regions( + regions_summary + .iter() + .map(|&(start, len)| MockRegion { start, len }) + .collect(), + ) + } + + fn new_guest_memory_collection_from_arc_regions( + regions_summary: &[(GuestAddress, u64)], + ) -> Result { + Collection::from_arc_regions( + regions_summary + .iter() + .map(|&(start, len)| Arc::new(MockRegion { start, len })) + .collect(), + ) + } + + #[test] + fn test_no_memory_region() { + let regions_summary = []; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::NoMemoryRegion + )); + } + + #[test] + fn test_overlapping_memory_regions() { + let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)]; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::MemoryRegionOverlap + )); + } + + #[test] + fn test_unsorted_memory_regions() { + let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)]; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::UnsortedMemoryRegions + )); + } + + #[test] + fn test_valid_memory_regions() { + let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)]; + + let guest_mem = Collection::new(); + assert_eq!(guest_mem.num_regions(), 0); + + check_guest_memory_mmap( + new_guest_memory_collection_from_regions(®ions_summary), + ®ions_summary, + ); + + check_guest_memory_mmap( + new_guest_memory_collection_from_arc_regions(®ions_summary), + ®ions_summary, + ); + } + + #[test] + fn test_mmap_insert_region() { + let region_size = 0x1000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10_0000), region_size), + ]; + let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); + let mut gm = mem_orig.clone(); + assert_eq!(mem_orig.num_regions(), 2); + + let new_regions = [ + (GuestAddress(0x8000), 0x1000), + (GuestAddress(0x4000), 0x1000), + (GuestAddress(0xc000), 0x1000), + ]; + + for (start, len) in new_regions { + gm = gm + .insert_region(Arc::new(MockRegion { start, len })) + .unwrap(); + } + + gm.insert_region(Arc::new(MockRegion { + start: GuestAddress(0xc000), + len: 0x1000, + })) + .unwrap_err(); + + assert_eq!(mem_orig.num_regions(), 2); + assert_eq!(gm.num_regions(), 5); + + let regions = gm.iter().collect::>(); + + assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); + assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); + assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); + assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); + } + + #[test] + fn test_mmap_remove_region() { + let region_size = 0x1000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10_0000), region_size), + ]; + let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); + let gm = mem_orig.clone(); + assert_eq!(mem_orig.num_regions(), 2); + + gm.remove_region(GuestAddress(0), 128).unwrap_err(); + gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); + let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); + + assert_eq!(mem_orig.num_regions(), 2); + assert_eq!(gm.num_regions(), 1); + + assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); + assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); + } + + #[test] + fn test_iter() { + let region_size = 0x400; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x1000), region_size), + ]; + let mut iterated_regions = Vec::new(); + let gm = new_guest_memory_collection_from_regions(®ions).unwrap(); + + for region in gm.iter() { + assert_eq!(region.len(), region_size as GuestUsize); + } + + for region in gm.iter() { + iterated_regions.push((region.start_addr(), region.len())); + } + assert_eq!(regions, iterated_regions); + + assert!(regions + .iter() + .map(|x| (x.0, x.1)) + .eq(iterated_regions.iter().copied())); + + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].start, regions[0].0); + assert_eq!(mmap_regions[1].start, regions[1].0); + } + + #[test] + fn test_address_in_range() { + let start_addr1 = GuestAddress(0x0); + let start_addr2 = GuestAddress(0x800); + let guest_mem = + new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) + .unwrap(); + + assert!(guest_mem.address_in_range(GuestAddress(0x200))); + assert!(!guest_mem.address_in_range(GuestAddress(0x600))); + assert!(guest_mem.address_in_range(GuestAddress(0xa00))); + assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); + } + + #[test] + fn test_check_address() { + let start_addr1 = GuestAddress(0x0); + let start_addr2 = GuestAddress(0x800); + let guest_mem = + new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) + .unwrap(); + + assert_eq!( + guest_mem.check_address(GuestAddress(0x200)), + Some(GuestAddress(0x200)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); + assert_eq!( + guest_mem.check_address(GuestAddress(0xa00)), + Some(GuestAddress(0xa00)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); + } + + #[test] + fn test_checked_offset() { + let start_addr1 = GuestAddress(0); + let start_addr2 = GuestAddress(0x800); + let start_addr3 = GuestAddress(0xc00); + let guest_mem = new_guest_memory_collection_from_regions(&[ + (start_addr1, 0x400), + (start_addr2, 0x400), + (start_addr3, 0x400), + ]) + .unwrap(); + + assert_eq!( + guest_mem.checked_offset(start_addr1, 0x200), + Some(GuestAddress(0x200)) + ); + assert_eq!( + guest_mem.checked_offset(start_addr1, 0xa00), + Some(GuestAddress(0xa00)) + ); + assert_eq!( + guest_mem.checked_offset(start_addr2, 0x7ff), + Some(GuestAddress(0xfff)) + ); + assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); + assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); + + assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); + assert_eq!( + guest_mem.checked_offset(start_addr1, 0x400 - 1), + Some(GuestAddress(0x400 - 1)) + ); + } + + #[test] + fn test_check_range() { + let start_addr1 = GuestAddress(0); + let start_addr2 = GuestAddress(0x800); + let start_addr3 = GuestAddress(0xc00); + let guest_mem = new_guest_memory_collection_from_regions(&[ + (start_addr1, 0x400), + (start_addr2, 0x400), + (start_addr3, 0x400), + ]) + .unwrap(); + + assert!(guest_mem.check_range(start_addr1, 0x0)); + assert!(guest_mem.check_range(start_addr1, 0x200)); + assert!(guest_mem.check_range(start_addr1, 0x400)); + assert!(!guest_mem.check_range(start_addr1, 0xa00)); + assert!(guest_mem.check_range(start_addr2, 0x7ff)); + assert!(guest_mem.check_range(start_addr2, 0x800)); + assert!(!guest_mem.check_range(start_addr2, 0x801)); + assert!(!guest_mem.check_range(start_addr2, 0xc00)); + assert!(!guest_mem.check_range(start_addr1, usize::MAX)); + } +} From 04b4509b42c6e5c1d4c639884101cbff86c7fc35 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 16:55:27 +0000 Subject: [PATCH 7/7] refactor(test): Use mock regions in atomic.rs tests These tests only test properties of the GuestMemoryAtomic implementation that are independent of the actual M: GuestMemory being used. So simplify it to drop the dependency on backend-mmap. Signed-off-by: Patrick Roy --- src/atomic.rs | 64 +++++++++++++++++++++++++++------------------------ src/region.rs | 12 +++++----- 2 files changed, 40 insertions(+), 36 deletions(-) diff --git a/src/atomic.rs b/src/atomic.rs index 4b20b2c4..22697d05 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -140,14 +140,12 @@ impl GuestMemoryExclusiveGuard<'_, M> { } #[cfg(test)] -#[cfg(feature = "backend-mmap")] mod tests { use super::*; - use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion}; + use crate::region::tests::{new_guest_memory_collection_from_regions, Collection, MockRegion}; + use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize}; - type GuestMemoryMmap = crate::GuestMemoryMmap<()>; - type GuestRegionMmap = crate::GuestRegionMmap<()>; - type GuestMemoryMmapAtomic = GuestMemoryAtomic; + type GuestMemoryMmapAtomic = GuestMemoryAtomic; #[test] fn test_atomic_memory() { @@ -157,7 +155,7 @@ mod tests { (GuestAddress(0x1000), region_size), ]; let mut iterated_regions = Vec::new(); - let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); + let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = gm.memory(); @@ -166,7 +164,7 @@ mod tests { } for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); + iterated_regions.push((region.start_addr(), region.len())); } assert_eq!(regions, iterated_regions); assert_eq!(mem.num_regions(), 2); @@ -207,7 +205,7 @@ mod tests { (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; - let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); + let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = { let guard1 = gm.memory(); @@ -219,11 +217,11 @@ mod tests { #[test] fn test_atomic_hotplug() { let region_size = 0x1000; - let regions = vec![ + let regions = [ (GuestAddress(0x0), region_size), (GuestAddress(0x10_0000), region_size), ]; - let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); + let mut gmm = Arc::new(new_guest_memory_collection_from_regions(®ions).unwrap()); let gm: GuestMemoryAtomic<_> = gmm.clone().into(); let mem_orig = gm.memory(); assert_eq!(mem_orig.num_regions(), 2); @@ -231,26 +229,32 @@ mod tests { { let guard = gm.lock().unwrap(); let new_gmm = Arc::make_mut(&mut gmm); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) - .unwrap(), - ); - new_gmm.insert_region(mmap).unwrap_err(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x8000), + len: 0x1000, + })) + .unwrap(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x4000), + len: 0x1000, + })) + .unwrap(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0xc000), + len: 0x1000, + })) + .unwrap(); + + new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x8000), + len: 0x1000, + })) + .unwrap_err(); + guard.replace(new_gmm); } diff --git a/src/region.rs b/src/region.rs index 16bf16a5..a6395d37 100644 --- a/src/region.rs +++ b/src/region.rs @@ -451,7 +451,7 @@ impl Bytes for R { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::region::{GuestMemoryRegionBytes, GuestRegionError}; use crate::{ Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, @@ -459,9 +459,9 @@ mod tests { use std::sync::Arc; #[derive(Debug, PartialEq, Eq)] - struct MockRegion { - start: GuestAddress, - len: GuestUsize, + pub(crate) struct MockRegion { + pub(crate) start: GuestAddress, + pub(crate) len: GuestUsize, } impl GuestMemoryRegion for MockRegion { @@ -482,7 +482,7 @@ mod tests { impl GuestMemoryRegionBytes for MockRegion {} - type Collection = GuestRegionCollection; + pub(crate) type Collection = GuestRegionCollection; fn check_guest_memory_mmap( maybe_guest_mem: Result, @@ -510,7 +510,7 @@ mod tests { } } - fn new_guest_memory_collection_from_regions( + pub(crate) fn new_guest_memory_collection_from_regions( regions_summary: &[(GuestAddress, u64)], ) -> Result { Collection::from_regions(