@@ -3131,4 +3131,195 @@ mod tests {
3131
3131
e => panic ! ( "Unexpected exit: {:?}" , e) ,
3132
3132
}
3133
3133
}
3134
+
3135
+ #[ test]
3136
+ #[ cfg( target_arch = "x86_64" ) ]
3137
+ fn test_coalesced_pio ( ) {
3138
+ use crate :: IoEventAddress ;
3139
+ use std:: io:: Write ;
3140
+
3141
+ const PORT : u64 = 0x2c ;
3142
+ const DATA : u64 = 0x39 ;
3143
+ const SIZE : u32 = 1 ;
3144
+
3145
+ #[ rustfmt:: skip]
3146
+ let code = [
3147
+ 0xe6 , 0x2c , // out 0x2c, al
3148
+ 0xf4 , // hlt
3149
+ 0xe6 , 0x2c , // out 0x2c, al
3150
+ 0xf4 , // hlt
3151
+ ] ;
3152
+
3153
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
3154
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
3155
+ assert ! ( vm. check_extension( Cap :: CoalescedPio ) ) ;
3156
+
3157
+ // Prepare guest memory
3158
+ let mem_size = 0x4000 ;
3159
+ let load_addr = mmap_anonymous ( mem_size) ;
3160
+ let guest_addr: u64 = 0x1000 ;
3161
+ let slot = 0 ;
3162
+ let mem_region = kvm_userspace_memory_region {
3163
+ slot,
3164
+ guest_phys_addr : guest_addr,
3165
+ memory_size : mem_size as u64 ,
3166
+ userspace_addr : load_addr as u64 ,
3167
+ flags : 0 ,
3168
+ } ;
3169
+
3170
+ unsafe {
3171
+ vm. set_user_memory_region ( mem_region) . unwrap ( ) ;
3172
+
3173
+ // Get a mutable slice of `mem_size` from `load_addr`.
3174
+ // This is safe because we mapped it before.
3175
+ let mut slice = std:: slice:: from_raw_parts_mut ( load_addr, mem_size) ;
3176
+ slice. write_all ( & code) . unwrap ( ) ;
3177
+ }
3178
+
3179
+ let addr = IoEventAddress :: Pio ( PORT ) ;
3180
+ vm. register_coalesced_mmio ( addr, SIZE ) . unwrap ( ) ;
3181
+
3182
+ let mut vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
3183
+
3184
+ // Map the MMIO ring
3185
+ vcpu. map_coalesced_mmio_ring ( ) . unwrap ( ) ;
3186
+
3187
+ // Set regs
3188
+ let mut regs = vcpu. get_regs ( ) . unwrap ( ) ;
3189
+ regs. rip = guest_addr;
3190
+ regs. rax = DATA ;
3191
+ regs. rflags = 2 ;
3192
+ vcpu. set_regs ( & regs) . unwrap ( ) ;
3193
+
3194
+ // Set sregs
3195
+ let mut sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
3196
+ sregs. cs . base = 0 ;
3197
+ sregs. cs . selector = 0 ;
3198
+ vcpu. set_sregs ( & sregs) . unwrap ( ) ;
3199
+
3200
+ // Run and check that the exit was caused by the hlt and not the port
3201
+ // I/O
3202
+ let exit = vcpu. run ( ) . unwrap ( ) ;
3203
+ assert ! ( matches!( exit, VcpuExit :: Hlt ) ) ;
3204
+
3205
+ // Check that the ring buffer entry is what we expect
3206
+ let entry = vcpu. coalesced_mmio_read ( ) . unwrap ( ) . unwrap ( ) ;
3207
+ assert_eq ! ( entry. phys_addr, PORT ) ;
3208
+ assert_eq ! ( entry. len, 1 ) ;
3209
+ assert_eq ! ( entry. data[ 0 ] as u64 , DATA ) ;
3210
+ // SAFETY: this field is a u32 in all variants of the union,
3211
+ // so access is always safe.
3212
+ let pio = unsafe { entry. __bindgen_anon_1 . pio } ;
3213
+ assert_eq ! ( pio, 1 ) ;
3214
+
3215
+ // The ring buffer should be empty now
3216
+ assert ! ( vcpu. coalesced_mmio_read( ) . unwrap( ) . is_none( ) ) ;
3217
+
3218
+ // Unregister and check that the next PIO write triggers an exit
3219
+ vm. unregister_coalesced_mmio ( addr, SIZE ) . unwrap ( ) ;
3220
+ let exit = vcpu. run ( ) . unwrap ( ) ;
3221
+ let VcpuExit :: IoOut ( port, data) = exit else {
3222
+ panic ! ( "Unexpected VM exit: {:?}" , exit) ;
3223
+ } ;
3224
+ assert_eq ! ( port, PORT as u16 ) ;
3225
+ assert_eq ! ( data, ( DATA as u8 ) . to_le_bytes( ) ) ;
3226
+ }
3227
+
3228
+ #[ test]
3229
+ #[ cfg( target_arch = "x86_64" ) ]
3230
+ fn test_coalesced_mmio ( ) {
3231
+ use crate :: IoEventAddress ;
3232
+ use std:: io:: Write ;
3233
+
3234
+ const ADDR : u64 = 0x124 ;
3235
+ const DATA : u64 = 0x39 ;
3236
+ const SIZE : u32 = 2 ;
3237
+
3238
+ #[ rustfmt:: skip]
3239
+ let code = [
3240
+ 0x66 , 0x31 , 0xFF , // xor di,di
3241
+ 0x66 , 0xBF , 0x24 , 0x01 , // mov di, 0x124
3242
+ 0x67 , 0x66 , 0x89 , 0x05 , // mov WORD PTR [di], ax
3243
+ 0xF4 , // hlt
3244
+ 0x66 , 0x31 , 0xFF , // xor di,di
3245
+ 0x66 , 0xBF , 0x24 , 0x01 , // mov di, 0x124
3246
+ 0x67 , 0x66 , 0x89 , 0x05 , // mov WORD PTR [di], ax
3247
+ 0xF4 , // hlt
3248
+ ] ;
3249
+
3250
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
3251
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
3252
+ assert ! ( vm. check_extension( Cap :: CoalescedMmio ) ) ;
3253
+
3254
+ // Prepare guest memory
3255
+ let mem_size = 0x4000 ;
3256
+ let load_addr = mmap_anonymous ( mem_size) ;
3257
+ let guest_addr: u64 = 0x1000 ;
3258
+ let slot: u32 = 0 ;
3259
+ let mem_region = kvm_userspace_memory_region {
3260
+ slot,
3261
+ guest_phys_addr : guest_addr,
3262
+ memory_size : mem_size as u64 ,
3263
+ userspace_addr : load_addr as u64 ,
3264
+ flags : 0 ,
3265
+ } ;
3266
+
3267
+ unsafe {
3268
+ vm. set_user_memory_region ( mem_region) . unwrap ( ) ;
3269
+
3270
+ // Get a mutable slice of `mem_size` from `load_addr`.
3271
+ // This is safe because we mapped it before.
3272
+ let mut slice = std:: slice:: from_raw_parts_mut ( load_addr, mem_size) ;
3273
+ slice. write_all ( & code) . unwrap ( ) ;
3274
+ }
3275
+
3276
+ let addr = IoEventAddress :: Mmio ( ADDR ) ;
3277
+ vm. register_coalesced_mmio ( addr, SIZE ) . unwrap ( ) ;
3278
+
3279
+ let mut vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
3280
+
3281
+ // Map the MMIO ring
3282
+ vcpu. map_coalesced_mmio_ring ( ) . unwrap ( ) ;
3283
+
3284
+ // Set regs
3285
+ let mut regs = vcpu. get_regs ( ) . unwrap ( ) ;
3286
+ regs. rip = guest_addr;
3287
+ regs. rax = DATA ;
3288
+ regs. rdx = ADDR ;
3289
+ regs. rflags = 2 ;
3290
+ vcpu. set_regs ( & regs) . unwrap ( ) ;
3291
+
3292
+ // Set sregs
3293
+ let mut sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
3294
+ sregs. cs . base = 0 ;
3295
+ sregs. cs . selector = 0 ;
3296
+ vcpu. set_sregs ( & sregs) . unwrap ( ) ;
3297
+
3298
+ // Run and check that the exit was caused by the hlt and not the MMIO
3299
+ // access
3300
+ let exit = vcpu. run ( ) . unwrap ( ) ;
3301
+ assert ! ( matches!( exit, VcpuExit :: Hlt ) ) ;
3302
+
3303
+ // Check that the ring buffer entry is what we expect
3304
+ let entry = vcpu. coalesced_mmio_read ( ) . unwrap ( ) . unwrap ( ) ;
3305
+ assert_eq ! ( entry. phys_addr, ADDR ) ;
3306
+ assert_eq ! ( entry. len, SIZE ) ;
3307
+ assert_eq ! ( entry. data[ 0 ] as u64 , DATA ) ;
3308
+ // SAFETY: this field is a u32 in all variants of the union,
3309
+ // so access is always safe.
3310
+ let pio = unsafe { entry. __bindgen_anon_1 . pio } ;
3311
+ assert_eq ! ( pio, 0 ) ;
3312
+
3313
+ // The ring buffer should be empty now
3314
+ assert ! ( vcpu. coalesced_mmio_read( ) . unwrap( ) . is_none( ) ) ;
3315
+
3316
+ // Unregister and check that the next MMIO write triggers an exit
3317
+ vm. unregister_coalesced_mmio ( addr, SIZE ) . unwrap ( ) ;
3318
+ let exit = vcpu. run ( ) . unwrap ( ) ;
3319
+ let VcpuExit :: MmioWrite ( addr, data) = exit else {
3320
+ panic ! ( "Unexpected VM exit: {:?}" , exit) ;
3321
+ } ;
3322
+ assert_eq ! ( addr, ADDR ) ;
3323
+ assert_eq ! ( data, ( DATA as u16 ) . to_le_bytes( ) ) ;
3324
+ }
3134
3325
}
0 commit comments