Skip to content

Generify parts of mmap.rs #312

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,17 @@
### Added

- \[[#311](https://github.com/rust-vmm/vm-memory/pull/311)\] Allow compiling without the ReadVolatile and WriteVolatile implementations
- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\] `GuestRegionContainer`, a generic container of `GuestMemoryRegion`s, generalizing `GuestMemoryMmap` (which
is now a type alias for `GuestRegionContainer<GuestRegionMmap>`)

### Changed

- \[[#307](https://github.com/rust-vmm/vm-memory/pull/304)\] Move `read_volatile_from`, `read_exact_volatile_from`,
`write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait.
- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` a default implementation,
based on linear search.
- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Implement `Bytes<MemoryRegionAddress>` generically
for all `R: GuestMemoryRegion`.

### Removed

Expand Down
64 changes: 34 additions & 30 deletions src/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,14 +140,12 @@ impl<M: GuestMemory> GuestMemoryExclusiveGuard<'_, M> {
}

#[cfg(test)]
#[cfg(feature = "backend-mmap")]
mod tests {
use super::*;
use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion};
use crate::region::tests::{new_guest_memory_collection_from_regions, Collection, MockRegion};
use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize};

type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
type GuestRegionMmap = crate::GuestRegionMmap<()>;
type GuestMemoryMmapAtomic = GuestMemoryAtomic<GuestMemoryMmap>;
type GuestMemoryMmapAtomic = GuestMemoryAtomic<Collection>;

#[test]
fn test_atomic_memory() {
Expand All @@ -157,7 +155,7 @@ mod tests {
(GuestAddress(0x1000), region_size),
];
let mut iterated_regions = Vec::new();
let gmm = GuestMemoryMmap::from_ranges(&regions).unwrap();
let gmm = new_guest_memory_collection_from_regions(&regions).unwrap();
let gm = GuestMemoryMmapAtomic::new(gmm);
let mem = gm.memory();

Expand All @@ -166,7 +164,7 @@ mod tests {
}

for region in mem.iter() {
iterated_regions.push((region.start_addr(), region.len() as usize));
iterated_regions.push((region.start_addr(), region.len()));
}
assert_eq!(regions, iterated_regions);
assert_eq!(mem.num_regions(), 2);
Expand Down Expand Up @@ -207,7 +205,7 @@ mod tests {
(GuestAddress(0x0), region_size),
(GuestAddress(0x1000), region_size),
];
let gmm = GuestMemoryMmap::from_ranges(&regions).unwrap();
let gmm = new_guest_memory_collection_from_regions(&regions).unwrap();
let gm = GuestMemoryMmapAtomic::new(gmm);
let mem = {
let guard1 = gm.memory();
Expand All @@ -219,38 +217,44 @@ mod tests {
#[test]
fn test_atomic_hotplug() {
let region_size = 0x1000;
let regions = vec![
let regions = [
(GuestAddress(0x0), region_size),
(GuestAddress(0x10_0000), region_size),
];
let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(&regions).unwrap());
let mut gmm = Arc::new(new_guest_memory_collection_from_regions(&regions).unwrap());
let gm: GuestMemoryAtomic<_> = gmm.clone().into();
let mem_orig = gm.memory();
assert_eq!(mem_orig.num_regions(), 2);

{
let guard = gm.lock().unwrap();
let new_gmm = Arc::make_mut(&mut gmm);
let mmap = Arc::new(
GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000))
.unwrap(),
);
let new_gmm = new_gmm.insert_region(mmap).unwrap();
let mmap = Arc::new(
GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000))
.unwrap(),
);
let new_gmm = new_gmm.insert_region(mmap).unwrap();
let mmap = Arc::new(
GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000))
.unwrap(),
);
let new_gmm = new_gmm.insert_region(mmap).unwrap();
let mmap = Arc::new(
GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000))
.unwrap(),
);
new_gmm.insert_region(mmap).unwrap_err();
let new_gmm = new_gmm
.insert_region(Arc::new(MockRegion {
start: GuestAddress(0x8000),
len: 0x1000,
}))
.unwrap();
let new_gmm = new_gmm
.insert_region(Arc::new(MockRegion {
start: GuestAddress(0x4000),
len: 0x1000,
}))
.unwrap();
let new_gmm = new_gmm
.insert_region(Arc::new(MockRegion {
start: GuestAddress(0xc000),
len: 0x1000,
}))
.unwrap();

new_gmm
.insert_region(Arc::new(MockRegion {
start: GuestAddress(0x8000),
len: 0x1000,
}))
.unwrap_err();

guard.replace(new_gmm);
}

Expand Down
141 changes: 6 additions & 135 deletions src/guest_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,11 @@ use std::sync::atomic::Ordering;
use std::sync::Arc;

use crate::address::{Address, AddressValue};
use crate::bitmap::{Bitmap, BS, MS};
use crate::bitmap::MS;
use crate::bytes::{AtomicAccess, Bytes};
use crate::io::{ReadVolatile, WriteVolatile};
use crate::volatile_memory::{self, VolatileSlice};
use crate::GuestMemoryRegion;

/// Errors associated with handling guest memory accesses.
#[allow(missing_docs)]
Expand Down Expand Up @@ -158,139 +159,6 @@ impl FileOffset {
}
}

/// Represents a continuous region of guest physical memory.
#[allow(clippy::len_without_is_empty)]
pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
/// Type used for dirty memory tracking.
type B: Bitmap;

/// Returns the size of the region.
fn len(&self) -> GuestUsize;

/// Returns the minimum (inclusive) address managed by the region.
fn start_addr(&self) -> GuestAddress;

/// Returns the maximum (inclusive) address managed by the region.
fn last_addr(&self) -> GuestAddress {
// unchecked_add is safe as the region bounds were checked when it was created.
self.start_addr().unchecked_add(self.len() - 1)
}

/// Borrow the associated `Bitmap` object.
fn bitmap(&self) -> &Self::B;

/// Returns the given address if it is within this region.
fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
if self.address_in_range(addr) {
Some(addr)
} else {
None
}
}

/// Returns `true` if the given address is within this region.
fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
addr.raw_value() < self.len()
}

/// Returns the address plus the offset if it is in this region.
fn checked_offset(
&self,
base: MemoryRegionAddress,
offset: usize,
) -> Option<MemoryRegionAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}

/// Tries to convert an absolute address to a relative address within this region.
///
/// Returns `None` if `addr` is out of the bounds of this region.
fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
addr.checked_offset_from(self.start_addr())
.and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
}

/// Returns the host virtual address corresponding to the region address.
///
/// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
/// have the capability to mmap guest address range into host virtual address space for
/// direct access, so the corresponding host virtual address may be passed to other subsystems.
///
/// # Note
/// The underlying guest memory is not protected from memory aliasing, which breaks the
/// Rust memory safety model. It's the caller's responsibility to ensure that there's no
/// concurrent accesses to the underlying guest memory.
fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
Err(Error::HostAddressNotAvailable)
}

/// Returns information regarding the file and offset backing this memory region.
fn file_offset(&self) -> Option<&FileOffset> {
None
}

/// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
/// `offset`.
#[allow(unused_variables)]
fn get_slice(
&self,
offset: MemoryRegionAddress,
count: usize,
) -> Result<VolatileSlice<BS<Self::B>>> {
Err(Error::HostAddressNotAvailable)
}

/// Gets a slice of memory for the entire region that supports volatile access.
///
/// # Examples (uses the `backend-mmap` feature)
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
/// # {
/// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion};
/// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef};
/// #
/// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None)
/// .expect("Could not create guest memory");
/// let slice = region
/// .as_volatile_slice()
/// .expect("Could not get volatile slice");
///
/// let v = 42u32;
/// let r = slice
/// .get_ref::<u32>(0x200)
/// .expect("Could not get reference");
/// r.store(v);
/// assert_eq!(r.load(), v);
/// # }
/// ```
fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
self.get_slice(MemoryRegionAddress(0), self.len() as usize)
}

/// Show if the region is based on the `HugeTLBFS`.
/// Returns Some(true) if the region is backed by hugetlbfs.
/// None represents that no information is available.
///
/// # Examples (uses the `backend-mmap` feature)
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
/// # {
/// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap};
/// let addr = GuestAddress(0x1000);
/// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap();
/// let r = mem.find_region(addr).unwrap();
/// assert_eq!(r.is_hugetlbfs(), None);
/// # }
/// ```
#[cfg(target_os = "linux")]
fn is_hugetlbfs(&self) -> Option<bool> {
None
}
}

/// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object.
/// The vm-memory crate already provides trivial implementation for
/// references to `GuestMemory` or reference-counted `GuestMemory` objects,
Expand Down Expand Up @@ -408,7 +276,10 @@ pub trait GuestMemory {
fn num_regions(&self) -> usize;

/// Returns the region containing the specified address or `None`.
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
self.iter()
.find(|region| addr >= region.start_addr() && addr <= region.last_addr())
}

/// Gets an iterator over the entries in the collection.
///
Expand Down
7 changes: 5 additions & 2 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,20 @@ pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize};
pub mod guest_memory;
pub use guest_memory::{
Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory,
GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult,
GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult,
};

pub mod region;
pub use region::{GuestMemoryRegion, GuestRegionCollection, GuestRegionError as Error};

pub mod io;
pub use io::{ReadVolatile, WriteVolatile};

#[cfg(feature = "backend-mmap")]
pub mod mmap;

#[cfg(feature = "backend-mmap")]
pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion};
pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapRegion};
#[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))]
pub use mmap::{MmapRange, MmapXenFlags};

Expand Down
Loading