diff --git a/Cargo.lock b/Cargo.lock index 8c3a8503ba7..817f1435160 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -814,6 +814,7 @@ dependencies = [ "utils 0.1.0", "versionize 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "versionize_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "virtio_gen 0.1.0", "vm-memory 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/src/api_server/src/parsed_request.rs b/src/api_server/src/parsed_request.rs index e12113c599c..39f3e1282a0 100644 --- a/src/api_server/src/parsed_request.rs +++ b/src/api_server/src/parsed_request.rs @@ -614,7 +614,7 @@ pub(crate) mod tests { .write_all( b"PUT /drives/string HTTP/1.1\r\n\ Content-Type: application/json\r\n\ - Content-Length: 266\r\n\r\n{ \ + Content-Length: 287\r\n\r\n{ \ \"drive_id\": \"string\", \ \"path_on_host\": \"string\", \ \"is_root_device\": true, \ @@ -631,7 +631,8 @@ pub(crate) mod tests { \"one_time_burst\": 0, \ \"refill_time\": 0 \ } \ - } \ + }, \ + \"want_flush\": false \ }", ) .unwrap(); diff --git a/src/api_server/src/request/drive.rs b/src/api_server/src/request/drive.rs index 7a86c623893..b9010207706 100644 --- a/src/api_server/src/request/drive.rs +++ b/src/api_server/src/request/drive.rs @@ -249,7 +249,8 @@ mod tests { "one_time_burst": 0, "refill_time": 0 } - } + }, + "want_flush": false }"#; assert!(parse_put_drive(&Body::new(body), Some(&"1000")).is_ok()); diff --git a/src/api_server/swagger/firecracker.yaml b/src/api_server/swagger/firecracker.yaml index b284c304758..5cd12a540ec 100644 --- a/src/api_server/swagger/firecracker.yaml +++ b/src/api_server/swagger/firecracker.yaml @@ -556,6 +556,9 @@ definitions: description: Host level path for the guest drive rate_limiter: $ref: "#/definitions/RateLimiter" + want_flush: + type: boolean + description: advertise synchronous virtio flush capability Error: type: object diff --git a/src/devices/src/virtio/block/device.rs b/src/devices/src/virtio/block/device.rs index 711283c24d2..4e39425d4e2 100644 --- a/src/devices/src/virtio/block/device.rs +++ b/src/devices/src/virtio/block/device.rs @@ -159,10 +159,15 @@ impl Block { is_disk_read_only: bool, is_disk_root: bool, rate_limiter: RateLimiter, + want_flush: bool, ) -> io::Result { let disk_properties = DiskProperties::new(disk_image_path, is_disk_read_only)?; - let mut avail_features = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_BLK_F_FLUSH); + let mut avail_features = 1u64 << VIRTIO_F_VERSION_1; + + if want_flush { + avail_features |= 1u64 << VIRTIO_BLK_F_FLUSH; + }; if is_disk_read_only { avail_features |= 1u64 << VIRTIO_BLK_F_RO; @@ -443,7 +448,8 @@ pub(crate) mod tests { use crate::check_metric_after_block; use crate::virtio::block::test_utils::{ - default_block, invoke_handler_for_queue_event, set_queue, set_rate_limiter, + default_block, default_block_flush, invoke_handler_for_queue_event, set_queue, + set_rate_limiter, }; use crate::virtio::test_utils::{default_mem, initialize_virtqueue, VirtQueue}; @@ -476,7 +482,7 @@ pub(crate) mod tests { assert_eq!(block.device_type(), TYPE_BLOCK); - let features: u64 = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_BLK_F_FLUSH); + let features: u64 = 1u64 << VIRTIO_F_VERSION_1; assert_eq!(block.avail_features_by_page(0), features as u32); assert_eq!(block.avail_features_by_page(1), (features >> 32) as u32); @@ -714,8 +720,10 @@ pub(crate) mod tests { } } + // Verify injected flush request does not hang when feature not supported #[test] - fn test_flush() { + fn test_flush_default_nohang() { + // default block device has flush disabled let mut block = default_block(); let mem = default_mem(); let vq = VirtQueue::new(GuestAddress(0), &mem, 16); @@ -757,6 +765,49 @@ pub(crate) mod tests { } } + // Verify injected flush request does not hang when feature enabled + #[test] + fn test_flush_enabled_nohang() { + let mut block = default_block_flush(); + let mem = default_mem(); + let vq = VirtQueue::new(GuestAddress(0), &mem, 16); + set_queue(&mut block, 0, vq.create_queue()); + block.activate(mem.clone()).unwrap(); + initialize_virtqueue(&vq); + + let request_type_addr = GuestAddress(vq.dtable[0].addr.get()); + let status_addr = GuestAddress(vq.dtable[2].addr.get()); + + // Flush completes successfully without a data descriptor. + { + vq.dtable[0].next.set(2); + + mem.write_obj::(VIRTIO_BLK_T_FLUSH, request_type_addr) + .unwrap(); + + invoke_handler_for_queue_event(&mut block); + assert_eq!(vq.used.idx.get(), 1); + assert_eq!(vq.used.ring[0].get().id, 0); + assert_eq!(vq.used.ring[0].get().len, 0); + assert_eq!(mem.read_obj::(status_addr).unwrap(), VIRTIO_BLK_S_OK); + } + + // Flush completes successfully even with a data descriptor. + { + vq.used.idx.set(0); + set_queue(&mut block, 0, vq.create_queue()); + vq.dtable[0].next.set(1); + + mem.write_obj::(VIRTIO_BLK_T_FLUSH, request_type_addr) + .unwrap(); + + invoke_handler_for_queue_event(&mut block); + assert_eq!(vq.used.idx.get(), 1); + assert_eq!(vq.used.ring[0].get().id, 0); + assert_eq!(vq.used.ring[0].get().len, 0); + assert_eq!(mem.read_obj::(status_addr).unwrap(), VIRTIO_BLK_S_OK); + } + } #[test] fn test_get_device_id() { let mut block = default_block(); diff --git a/src/devices/src/virtio/block/persist.rs b/src/devices/src/virtio/block/persist.rs index 04c0d8acb5c..4db004b453d 100644 --- a/src/devices/src/virtio/block/persist.rs +++ b/src/devices/src/virtio/block/persist.rs @@ -11,7 +11,7 @@ use rate_limiter::{persist::RateLimiterState, RateLimiter}; use snapshot::Persist; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; -use virtio_gen::virtio_blk::VIRTIO_BLK_F_RO; +use virtio_gen::virtio_blk::{VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_RO}; use vm_memory::GuestMemoryMmap; use super::*; @@ -46,6 +46,7 @@ impl Persist<'_> for Block { disk_path: self.disk.file_path().clone(), virtio_state: VirtioDeviceState::from_device(self), rate_limiter_state: self.rate_limiter.save(), + // has_flush will come from advertised features } } @@ -55,6 +56,7 @@ impl Persist<'_> for Block { ) -> Result { let is_disk_read_only = state.virtio_state.avail_features & (1u64 << VIRTIO_BLK_F_RO) != 0; let rate_limiter = RateLimiter::restore((), &state.rate_limiter_state)?; + let has_flush = state.virtio_state.avail_features & (1u64 << VIRTIO_BLK_F_FLUSH) != 0; let mut block = Block::new( state.id.clone(), @@ -63,6 +65,7 @@ impl Persist<'_> for Block { is_disk_read_only, state.root_device, rate_limiter, + has_flush, )?; block.queues = state @@ -104,6 +107,7 @@ mod tests { false, false, RateLimiter::default(), + false, ) .unwrap(); let guest_mem = default_mem(); diff --git a/src/devices/src/virtio/block/request.rs b/src/devices/src/virtio/block/request.rs index 9814b0f536e..6e82cabeeee 100644 --- a/src/devices/src/virtio/block/request.rs +++ b/src/devices/src/virtio/block/request.rs @@ -22,6 +22,7 @@ use super::{Error, SECTOR_SHIFT, SECTOR_SIZE}; pub enum ExecuteError { BadRequest(Error), Flush(io::Error), + SyncAll(io::Error), Read(GuestMemoryError), Seek(io::Error), Write(GuestMemoryError), @@ -33,6 +34,7 @@ impl ExecuteError { match *self { ExecuteError::BadRequest(_) => VIRTIO_BLK_S_IOERR, ExecuteError::Flush(_) => VIRTIO_BLK_S_IOERR, + ExecuteError::SyncAll(_) => VIRTIO_BLK_S_IOERR, ExecuteError::Read(_) => VIRTIO_BLK_S_IOERR, ExecuteError::Seek(_) => VIRTIO_BLK_S_IOERR, ExecuteError::Write(_) => VIRTIO_BLK_S_IOERR, @@ -227,13 +229,13 @@ impl Request { METRICS.block.write_bytes.add(self.data_len as usize); METRICS.block.write_count.inc(); } - RequestType::Flush => match diskfile.flush() { - Ok(_) => { - METRICS.block.flush_count.inc(); - return Ok(0); - } - Err(e) => return Err(ExecuteError::Flush(e)), - }, + RequestType::Flush => { + // flush() first to force any cached data out + diskfile.flush().map_err(ExecuteError::Flush)?; + // sync data out to physical media on host + diskfile.sync_all().map_err(ExecuteError::SyncAll)?; + METRICS.block.flush_count.inc(); + } RequestType::GetDeviceID => { let disk_id = disk.image_id(); if (self.data_len as usize) < disk_id.len() { diff --git a/src/devices/src/virtio/block/test_utils.rs b/src/devices/src/virtio/block/test_utils.rs index e79bf30b388..beda856caed 100644 --- a/src/devices/src/virtio/block/test_utils.rs +++ b/src/devices/src/virtio/block/test_utils.rs @@ -18,14 +18,32 @@ pub fn default_block() -> Block { default_block_with_path(f.as_path().to_str().unwrap().to_string()) } +/// Create a Block instance with flush enabled +pub fn default_block_flush() -> Block { + // Create backing file. + let f = TempFile::new().unwrap(); + f.as_file().set_len(0x1000).unwrap(); + + default_block_with_path_flush(f.as_path().to_str().unwrap().to_string()) +} + /// Create a default Block instance using file at the specified path to be used in tests. pub fn default_block_with_path(path: String) -> Block { // Rate limiting is enabled but with a high operation rate (10 million ops/s). let rate_limiter = RateLimiter::new(0, 0, 0, 100_000, 0, 10).unwrap(); let id = "test".to_string(); - // The default block device is read-write and non-root. - Block::new(id, None, path, false, false, rate_limiter).unwrap() + // The default block device is read-write, non-root with flush disabled + Block::new(id, None, path, false, false, rate_limiter, false).unwrap() +} + +/// Create a default Block instance using file at the specified path with flush enabled +pub fn default_block_with_path_flush(path: String) -> Block { + // Rate limiting is enabled but with a high operation rate (10 million ops/s). + let rate_limiter = RateLimiter::new(0, 0, 0, 100_000, 0, 10).unwrap(); + + let id = "test".to_string(); + Block::new(id, None, path, false, false, rate_limiter, true).unwrap() } pub fn invoke_handler_for_queue_event(b: &mut Block) { diff --git a/src/vmm/Cargo.toml b/src/vmm/Cargo.toml index 87f356a56fe..3e325f65052 100644 --- a/src/vmm/Cargo.toml +++ b/src/vmm/Cargo.toml @@ -26,6 +26,7 @@ rate_limiter = { path = "../rate_limiter" } seccomp = { path = "../seccomp" } snapshot = { path = "../snapshot"} utils = { path = "../utils" } +virtio_gen = { path = "../virtio_gen" } [target.'cfg(target_arch = "x86_64")'.dependencies] cpuid = { path = "../cpuid" } diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index 61bf53981e3..f8683de11bf 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -811,6 +811,7 @@ pub mod tests { is_root_device: bool, partuuid: Option, is_read_only: bool, + want_flush: bool, } impl CustomBlockConfig { @@ -819,12 +820,14 @@ pub mod tests { is_root_device: bool, partuuid: Option, is_read_only: bool, + want_flush: bool, ) -> Self { CustomBlockConfig { drive_id, is_root_device, partuuid, is_read_only, + want_flush, } } } @@ -906,6 +909,7 @@ pub mod tests { partuuid: custom_block_cfg.partuuid.clone(), is_read_only: custom_block_cfg.is_read_only, rate_limiter: None, + want_flush: Some(custom_block_cfg.want_flush), }; block_dev_configs.insert(block_device_config).unwrap(); } @@ -1061,7 +1065,13 @@ pub mod tests { // Use case 1: root block device is not specified through PARTUUID. { let drive_id = String::from("root"); - let block_configs = vec![CustomBlockConfig::new(drive_id.clone(), true, None, true)]; + let block_configs = vec![CustomBlockConfig::new( + drive_id.clone(), + true, + None, + true, + false, + )]; let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs); @@ -1080,6 +1090,7 @@ pub mod tests { true, Some("0eaa91a0-01".to_string()), false, + false, )]; let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); @@ -1099,6 +1110,7 @@ pub mod tests { false, Some("0eaa91a0-01".to_string()), false, + false, )]; let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); @@ -1119,9 +1131,10 @@ pub mod tests { true, Some("0eaa91a0-01".to_string()), false, + false, ), - CustomBlockConfig::new(String::from("secondary"), false, None, true), - CustomBlockConfig::new(String::from("third"), false, None, false), + CustomBlockConfig::new(String::from("secondary"), false, None, true, false), + CustomBlockConfig::new(String::from("third"), false, None, false, false), ]; let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); @@ -1151,7 +1164,13 @@ pub mod tests { // Use case 5: root block device is rw. { let drive_id = String::from("root"); - let block_configs = vec![CustomBlockConfig::new(drive_id.clone(), true, None, false)]; + let block_configs = vec![CustomBlockConfig::new( + drive_id.clone(), + true, + None, + false, + false, + )]; let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs); @@ -1170,6 +1189,7 @@ pub mod tests { true, Some("0eaa91a0-01".to_string()), true, + false, )]; let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); @@ -1180,6 +1200,26 @@ pub mod tests { .get_device(DeviceType::Virtio(TYPE_BLOCK), drive_id.as_str()) .is_some()); } + + // Use case 7: root block device is rw with flush supported + { + let drive_id = String::from("root"); + let block_configs = vec![CustomBlockConfig::new( + drive_id.clone(), + true, + None, + false, + true, + )]; + let mut vmm = default_vmm(); + let mut cmdline = default_kernel_cmdline(); + insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs); + assert!(cmdline.as_str().contains("root=/dev/vda rw")); + assert!(vmm + .mmio_device_manager + .get_device(DeviceType::Virtio(TYPE_BLOCK), drive_id.as_str()) + .is_some()); + } } #[test] diff --git a/src/vmm/src/default_syscalls/filters.rs b/src/vmm/src/default_syscalls/filters.rs index 424ed768ad9..994a0f05c00 100644 --- a/src/vmm/src/default_syscalls/filters.rs +++ b/src/vmm/src/default_syscalls/filters.rs @@ -162,6 +162,7 @@ pub fn default_filter() -> Result { or![and![Cond::new(1, ArgLen::DWORD, Eq, 0u64)?],], ), allow_syscall(libc::SYS_write), + allow_syscall(libc::SYS_fsync), ] .into_iter() .collect(), diff --git a/src/vmm/src/device_manager/persist.rs b/src/vmm/src/device_manager/persist.rs index 719da1b948b..89994013dae 100644 --- a/src/vmm/src/device_manager/persist.rs +++ b/src/vmm/src/device_manager/persist.rs @@ -421,7 +421,7 @@ mod tests { // Add a block device. let drive_id = String::from("root"); - let block_configs = vec![CustomBlockConfig::new(drive_id, true, None, true)]; + let block_configs = vec![CustomBlockConfig::new(drive_id, true, None, true, false)]; _block_files = insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs); // Add a net device. diff --git a/src/vmm/src/persist.rs b/src/vmm/src/persist.rs index 61be19299e1..c0fc92002a2 100644 --- a/src/vmm/src/persist.rs +++ b/src/vmm/src/persist.rs @@ -307,7 +307,7 @@ mod tests { // Add a block device. let drive_id = String::from("root"); - let block_configs = vec![CustomBlockConfig::new(drive_id, true, None, true)]; + let block_configs = vec![CustomBlockConfig::new(drive_id, true, None, true, false)]; insert_block_devices(&mut vmm, &mut cmdline, event_manager, block_configs); // Add net device. diff --git a/src/vmm/src/resources.rs b/src/vmm/src/resources.rs index 59149c9e502..d4162851cd5 100644 --- a/src/vmm/src/resources.rs +++ b/src/vmm/src/resources.rs @@ -348,6 +348,7 @@ mod tests { partuuid: Some("0eaa91a0-01".to_string()), is_read_only: false, rate_limiter: Some(RateLimiterConfig::default()), + want_flush: Some(false), }, tmp_file, ) diff --git a/src/vmm/src/vmm_config/drive.rs b/src/vmm/src/vmm_config/drive.rs index 29a4fb27c19..8faeaaff025 100644 --- a/src/vmm/src/vmm_config/drive.rs +++ b/src/vmm/src/vmm_config/drive.rs @@ -81,6 +81,9 @@ pub struct BlockDeviceConfig { pub is_read_only: bool, /// Rate Limiter for I/O operations. pub rate_limiter: Option, + /// If set to true, the virtio layer will advertise VIRTIO_BLK_F_FLUSH and + /// synchronously flush data to backing store upon receipt of a flush request + pub want_flush: Option, } /// Wrapper for the collection that holds all the Block Devices @@ -165,6 +168,7 @@ impl BlockBuilder { if !path_on_host.exists() { return Err(DriveError::InvalidBlockDevicePath); } + let want_flush = block_device_config.want_flush.unwrap_or(false); let rate_limiter = block_device_config .rate_limiter @@ -180,6 +184,7 @@ impl BlockBuilder { block_device_config.is_read_only, block_device_config.is_root_device, rate_limiter.unwrap_or_default(), + want_flush, ) .map_err(DriveError::CreateBlockDevice) } @@ -208,6 +213,7 @@ mod tests { is_read_only: self.is_read_only, drive_id: self.drive_id.clone(), rate_limiter: None, + want_flush: self.want_flush, } } } @@ -230,6 +236,7 @@ mod tests { is_read_only: false, drive_id: dummy_id.clone(), rate_limiter: None, + want_flush: None, }; let mut block_devs = BlockBuilder::new(); @@ -259,6 +266,7 @@ mod tests { is_read_only: true, drive_id: String::from("1"), rate_limiter: None, + want_flush: None, }; let mut block_devs = BlockBuilder::new(); @@ -285,6 +293,7 @@ mod tests { is_read_only: false, drive_id: String::from("1"), rate_limiter: None, + want_flush: None, }; let dummy_file_2 = TempFile::new().unwrap(); @@ -296,6 +305,7 @@ mod tests { is_read_only: false, drive_id: String::from("2"), rate_limiter: None, + want_flush: None, }; let mut block_devs = BlockBuilder::new(); @@ -318,6 +328,7 @@ mod tests { is_read_only: false, drive_id: String::from("1"), rate_limiter: None, + want_flush: None, }; let dummy_file_2 = TempFile::new().unwrap(); @@ -329,6 +340,7 @@ mod tests { is_read_only: false, drive_id: String::from("2"), rate_limiter: None, + want_flush: None, }; let dummy_file_3 = TempFile::new().unwrap(); @@ -340,6 +352,7 @@ mod tests { is_read_only: false, drive_id: String::from("3"), rate_limiter: None, + want_flush: None, }; let mut block_devs = BlockBuilder::new(); @@ -376,6 +389,7 @@ mod tests { is_read_only: false, drive_id: String::from("1"), rate_limiter: None, + want_flush: None, }; let dummy_file_2 = TempFile::new().unwrap(); @@ -387,6 +401,7 @@ mod tests { is_read_only: false, drive_id: String::from("2"), rate_limiter: None, + want_flush: None, }; let dummy_file_3 = TempFile::new().unwrap(); @@ -398,6 +413,7 @@ mod tests { is_read_only: false, drive_id: String::from("3"), rate_limiter: None, + want_flush: None, }; let mut block_devs = BlockBuilder::new(); @@ -435,6 +451,7 @@ mod tests { is_read_only: false, drive_id: String::from("1"), rate_limiter: None, + want_flush: None, }; let dummy_file_2 = TempFile::new().unwrap(); @@ -446,6 +463,7 @@ mod tests { is_read_only: false, drive_id: String::from("2"), rate_limiter: None, + want_flush: None, }; let mut block_devs = BlockBuilder::new(); @@ -504,6 +522,7 @@ mod tests { is_read_only: false, drive_id: String::from("1"), rate_limiter: None, + want_flush: None, }; // Switch roots and add a PARTUUID for the new one. let mut root_block_device_old = root_block_device; @@ -515,6 +534,7 @@ mod tests { is_read_only: false, drive_id: String::from("2"), rate_limiter: None, + want_flush: None, }; assert!(block_devs.insert(root_block_device_old).is_ok()); let root_block_id = root_block_device_new.drive_id.clone(); @@ -537,6 +557,7 @@ mod tests { partuuid: Some("0eaa91a0-01".to_string()), is_read_only: true, rate_limiter: None, + want_flush: None, }; assert_eq!( @@ -549,4 +570,79 @@ mod tests { ); assert_eq!(block_config.is_read_only, expected_is_read_only); } + + // Test adding devices with explicit flush parameters + #[test] + fn test_block_dev_flush() { + use devices::virtio::device::VirtioDevice; + use virtio_gen::virtio_blk::VIRTIO_BLK_F_FLUSH; + + let dummy_file_1 = TempFile::new().unwrap(); + let dummy_path_1 = dummy_file_1.as_path().to_str().unwrap().to_string(); + let root_block_device = BlockDeviceConfig { + path_on_host: dummy_path_1, + is_root_device: true, + partuuid: None, + is_read_only: false, + drive_id: String::from("1"), + rate_limiter: None, + want_flush: None, + }; + + let dummy_file_2 = TempFile::new().unwrap(); + let dummy_path_2 = dummy_file_2.as_path().to_str().unwrap().to_string(); + let dummy_block_dev_2 = BlockDeviceConfig { + path_on_host: dummy_path_2, + is_root_device: false, + partuuid: None, + is_read_only: false, + drive_id: String::from("2"), + rate_limiter: None, + want_flush: Some(false), + }; + + let dummy_file_3 = TempFile::new().unwrap(); + let dummy_path_3 = dummy_file_3.as_path().to_str().unwrap().to_string(); + let dummy_block_dev_3 = BlockDeviceConfig { + path_on_host: dummy_path_3, + is_root_device: false, + partuuid: None, + is_read_only: false, + drive_id: String::from("3"), + rate_limiter: None, + want_flush: Some(true), + }; + + let mut block_devs = BlockBuilder::new(); + assert!(block_devs.insert(dummy_block_dev_2.clone()).is_ok()); + assert!(block_devs.insert(dummy_block_dev_3.clone()).is_ok()); + assert!(block_devs.insert(root_block_device.clone()).is_ok()); + + assert_eq!(block_devs.list.len(), 3); + assert_eq!( + block_devs.list[0].lock().unwrap().id(), + &root_block_device.drive_id + ); + assert_eq!( + block_devs.list[1].lock().unwrap().id(), + &dummy_block_dev_2.drive_id + ); + assert_eq!( + block_devs.list[2].lock().unwrap().id(), + &dummy_block_dev_3.drive_id + ); + + // Make sure the correct bit is enabled + let feature = block_devs.list[0].lock().unwrap().avail_features(); + assert_eq!((feature & (1u64 << VIRTIO_BLK_F_FLUSH)), 0); + + let feature = block_devs.list[1].lock().unwrap().avail_features(); + assert_eq!((feature & (1u64 << VIRTIO_BLK_F_FLUSH)), 0); + + let feature = block_devs.list[2].lock().unwrap().avail_features(); + assert_eq!( + (feature & (1u64 << VIRTIO_BLK_F_FLUSH)), + (1u64 << VIRTIO_BLK_F_FLUSH) + ); + } } diff --git a/tests/framework/microvm.py b/tests/framework/microvm.py index 22871a11c38..4e058a3d9e4 100644 --- a/tests/framework/microvm.py +++ b/tests/framework/microvm.py @@ -558,6 +558,7 @@ def add_drive( root_device=False, is_read_only=False, partuuid=None, + want_flush=False, ): """Add a block device.""" response = self.drive.put( @@ -565,7 +566,8 @@ def add_drive( path_on_host=self.create_jailed_resource(file_path), is_root_device=root_device, is_read_only=is_read_only, - partuuid=partuuid + partuuid=partuuid, + want_flush=want_flush ) assert self.api_session.is_status_no_content(response.status_code) diff --git a/tests/framework/resources.py b/tests/framework/resources.py index f05a19674f0..114d84e92f7 100644 --- a/tests/framework/resources.py +++ b/tests/framework/resources.py @@ -142,7 +142,8 @@ def create_json( is_root_device=None, partuuid=None, is_read_only=None, - rate_limiter=None): + rate_limiter=None, + want_flush=None): """Compose the json associated to this type of API request.""" datax = {} @@ -164,6 +165,9 @@ def create_json( if rate_limiter is not None: datax['rate_limiter'] = rate_limiter + if want_flush is not None: + datax['want_flush'] = want_flush + return datax diff --git a/tests/integration_tests/build/test_binary_size.py b/tests/integration_tests/build/test_binary_size.py index 2d249995d66..a8c64fd7ed9 100644 --- a/tests/integration_tests/build/test_binary_size.py +++ b/tests/integration_tests/build/test_binary_size.py @@ -19,9 +19,9 @@ "JAILER_BINARY_SIZE_LIMIT": 1511488, }, "aarch64": { - "FC_BINARY_SIZE_TARGET": 1531384, + "FC_BINARY_SIZE_TARGET": 1609728, "JAILER_BINARY_SIZE_TARGET": 1338312, - "FC_BINARY_SIZE_LIMIT": 1607953, + "FC_BINARY_SIZE_LIMIT": 1686528, "JAILER_BINARY_SIZE_LIMIT": 1511488, } } diff --git a/tests/integration_tests/functional/test_drives.py b/tests/integration_tests/functional/test_drives.py index 6b86510732d..553e801dc6f 100644 --- a/tests/integration_tests/functional/test_drives.py +++ b/tests/integration_tests/functional/test_drives.py @@ -10,6 +10,7 @@ import host_tools.drive as drive_tools import host_tools.network as net_tools # pylint: disable=import-error +import host_tools.logging as log_tools # metrics def test_rescan_file(test_microvm_with_ssh, network_config): @@ -329,6 +330,108 @@ def test_patch_drive(test_microvm_with_ssh, network_config): assert stdout.readline().strip() == size_bytes_str +def test_rootfs_rw_noflush(test_microvm_with_ssh, network_config): + """Verify default rootfs does not invoke flush.""" + test_microvm = test_microvm_with_ssh + test_microvm.spawn() + + # Set up the microVM with 1 vCPUs, 256 MiB of RAM, no network ifaces and + # a root file system with the rw permission. The network interfaces is + # added after we get a unique MAC and IP. + test_microvm.basic_config( + vcpu_count=1, + add_root_device=False + ) + + _tap, _, _ = test_microvm.ssh_network_config(network_config, '1') + + # Add the root block device + test_microvm.add_drive( + 'rootfs', + test_microvm.rootfs_file, + root_device=True, + ) + + # Configure metrics, to get later the `flush_count`. + metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo') + metrics_fifo = log_tools.Fifo(metrics_fifo_path) + response = test_microvm.metrics.put( + metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path) + ) + assert test_microvm.api_session.is_status_no_content(response.status_code) + + test_microvm.start() + + # From spec: pub const VIRTIO_F_VERSION_1: u32 = 32; + lo32 = "00000000000000000000000000000000" + hi32 = "10000000000000000000000000000000" + default_features = lo32 + hi32 + + ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config) + cmd = "cat /sys/block/vda/device/features" + _, stdout, stderr = ssh_connection.execute_command(cmd) + assert stderr.read() == '' + assert stdout.readline().strip() == default_features + + # Verify no flush commands were generated during boot. By + # default, on a RW device with flush enabled, there will be + # +- six virtio flush commands + fc_metrics = test_microvm.flush_metrics(metrics_fifo) + assert fc_metrics['block']['flush_count'] == 0 + + +def test_rootfs_rw_flush(test_microvm_with_ssh, network_config): + """Verify rootfs with flush does invoke flush.""" + test_microvm = test_microvm_with_ssh + test_microvm.spawn() + + # Set up the microVM with 1 vCPUs, 256 MiB of RAM, no network ifaces and + # a root file system with the rw permission. The network interfaces is + # added after we get a unique MAC and IP. + test_microvm.basic_config( + vcpu_count=1, + add_root_device=False + ) + + _tap, _, _ = test_microvm.ssh_network_config(network_config, '1') + + # Add the root block device + test_microvm.add_drive( + 'rootfs', + test_microvm.rootfs_file, + root_device=True, + want_flush=True, + ) + + # Configure metrics, to get later the `flush_count`. + metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo') + metrics_fifo = log_tools.Fifo(metrics_fifo_path) + response = test_microvm.metrics.put( + metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path) + ) + assert test_microvm.api_session.is_status_no_content(response.status_code) + + test_microvm.start() + + # From spec: pub const VIRTIO_F_VERSION_1: u32 = 32; + # pub const VIRTIO_BLK_F_FLUSH: u32 = 9; + lo32 = "00000000010000000000000000000000" + hi32 = "10000000000000000000000000000000" + default_features = lo32 + hi32 + + ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config) + cmd = "cat /sys/block/vda/device/features" + _, stdout, stderr = ssh_connection.execute_command(cmd) + assert stderr.read() == '' + assert stdout.readline().strip() == default_features + + # Verify no flush commands were generated during boot. By + # default, on a RW device with flush enabled, there will be + # +- six virtio flush commands + fc_metrics = test_microvm.flush_metrics(metrics_fifo) + assert fc_metrics['block']['flush_count'] > 0 + + def _check_block_size(ssh_connection, dev_path, size): _, stdout, stderr = ssh_connection.execute_command( 'blockdev --getsize64 {}'.format(dev_path)