1
+ // Copyright © 2020, Oracle and/or its affiliates.
2
+ //
1
3
// Copyright (c) 2019 Intel Corporation. All rights reserved.
2
4
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
5
//
@@ -36,6 +38,13 @@ use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestUsize};
36
38
#[ allow( missing_docs) ]
37
39
#[ cfg_attr( feature = "cargo-clippy" , allow( clippy:: all) ) ]
38
40
pub mod bootparam;
41
+
42
+ #[ cfg( feature = "elf" ) ]
43
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
44
+ #[ allow( missing_docs) ]
45
+ #[ cfg_attr( feature = "cargo-clippy" , allow( clippy:: all) ) ]
46
+ pub mod start_info;
47
+
39
48
#[ allow( dead_code) ]
40
49
#[ allow( non_camel_case_types) ]
41
50
#[ allow( non_snake_case) ]
@@ -93,6 +102,12 @@ pub enum Error {
93
102
SeekBzImageHeader ,
94
103
/// Unable to seek to bzImage compressed kernel.
95
104
SeekBzImageCompressedKernel ,
105
+ /// Unable to seek to note header.
106
+ SeekNoteHeader ,
107
+ /// Unable to read note header.
108
+ ReadNoteHeader ,
109
+ /// Invalid PVH note.
110
+ InvalidPvhNote ,
96
111
}
97
112
98
113
/// A specialized `Result` type for the kernel loader.
@@ -125,6 +140,9 @@ impl error::Error for Error {
125
140
Error :: SeekBzImageEnd => "Unable to seek bzImage end" ,
126
141
Error :: SeekBzImageHeader => "Unable to seek bzImage header" ,
127
142
Error :: SeekBzImageCompressedKernel => "Unable to seek bzImage compressed kernel" ,
143
+ Error :: SeekNoteHeader => "Unable to seek to note header" ,
144
+ Error :: ReadNoteHeader => "Unable to read note header" ,
145
+ Error :: InvalidPvhNote => "Invalid PVH note header" ,
128
146
}
129
147
}
130
148
}
@@ -150,6 +168,10 @@ pub struct KernelLoaderResult {
150
168
/// This field is only for bzImage following https://www.kernel.org/doc/Documentation/x86/boot.txt
151
169
/// VMM should make use of it to fill zero page for bzImage direct boot.
152
170
pub setup_header : Option < bootparam:: setup_header > ,
171
+ /// This field optionally holds the address of a PVH entry point, indicating that
172
+ /// the kernel supports the PVH boot protocol as described in:
173
+ /// https://xenbits.xen.org/docs/unstable/misc/pvh.html
174
+ pub pvh_entry_addr : Option < GuestAddress > ,
153
175
}
154
176
155
177
/// A kernel image loading support must implement the KernelLoader trait.
@@ -247,6 +269,10 @@ impl KernelLoader for Elf {
247
269
// Read in each section pointed to by the program headers.
248
270
for phdr in & phdrs {
249
271
if phdr. p_type != elf:: PT_LOAD || phdr. p_filesz == 0 {
272
+ if phdr. p_type == elf:: PT_NOTE {
273
+ // This segment describes a Note, check if PVH entry point is encoded.
274
+ loader_result. pvh_entry_addr = parse_elf_note ( phdr, kernel_image) ?;
275
+ }
250
276
continue ;
251
277
}
252
278
@@ -280,6 +306,79 @@ impl KernelLoader for Elf {
280
306
}
281
307
}
282
308
309
+ #[ cfg( feature = "elf" ) ]
310
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
311
+ fn parse_elf_note < F > ( phdr : & elf:: Elf64_Phdr , kernel_image : & mut F ) -> Result < Option < GuestAddress > >
312
+ where
313
+ F : Read + Seek ,
314
+ {
315
+ // Type of note header that encodes a 32-bit entry point address
316
+ // to boot a guest kernel using the PVH boot protocol.
317
+ const XEN_ELFNOTE_PHYS32_ENTRY : u32 = 18 ;
318
+
319
+ let n_align = phdr. p_align ;
320
+
321
+ // Seek to the beginning of the note segment
322
+ kernel_image
323
+ . seek ( SeekFrom :: Start ( phdr. p_offset ) )
324
+ . map_err ( |_| Error :: SeekNoteHeader ) ?;
325
+
326
+ // Now that the segment has been found, we must locate an ELF note with the
327
+ // correct type that encodes the PVH entry point if there is one.
328
+ let mut nhdr: elf:: Elf64_Nhdr = Default :: default ( ) ;
329
+ let mut read_size: usize = 0 ;
330
+
331
+ while read_size < phdr. p_filesz as usize {
332
+ unsafe {
333
+ // read_struct is safe when reading a POD struct.
334
+ // It can be used and dropped without issue.
335
+ struct_util:: read_struct ( kernel_image, & mut nhdr) . map_err ( |_| Error :: ReadNoteHeader ) ?;
336
+ }
337
+ // If the note header found is not the desired one, keep reading until
338
+ // the end of the segment
339
+ if nhdr. n_type == XEN_ELFNOTE_PHYS32_ENTRY {
340
+ break ;
341
+ }
342
+ // Skip the note header plus the size of its fields (with alignment)
343
+ read_size += mem:: size_of :: < elf:: Elf64_Nhdr > ( )
344
+ + align_up ( u64:: from ( nhdr. n_namesz ) , n_align)
345
+ + align_up ( u64:: from ( nhdr. n_descsz ) , n_align) ;
346
+
347
+ kernel_image
348
+ . seek ( SeekFrom :: Start ( phdr. p_offset + read_size as u64 ) )
349
+ . map_err ( |_| Error :: SeekNoteHeader ) ?;
350
+ }
351
+
352
+ if read_size >= phdr. p_filesz as usize {
353
+ return Ok ( None ) ; // PVH ELF note not found, nothing else to do.
354
+ }
355
+ // Otherwise the correct note type was found.
356
+ // The note header struct has already been read, so we can seek from the
357
+ // current position and just skip the name field contents.
358
+ kernel_image
359
+ . seek ( SeekFrom :: Current (
360
+ align_up ( u64:: from ( nhdr. n_namesz ) , n_align) as i64 ,
361
+ ) )
362
+ . map_err ( |_| Error :: SeekNoteHeader ) ?;
363
+
364
+ // The PVH entry point is a 32-bit address, so the descriptor field
365
+ // must be capable of storing all such addresses.
366
+ if ( nhdr. n_descsz as usize ) < mem:: size_of :: < u32 > ( ) {
367
+ return Err ( Error :: InvalidPvhNote ) ;
368
+ }
369
+
370
+ let mut pvh_addr_bytes = [ 0 ; mem:: size_of :: < u32 > ( ) ] ;
371
+
372
+ // Read 32-bit address stored in the PVH note descriptor field.
373
+ kernel_image
374
+ . read_exact ( & mut pvh_addr_bytes)
375
+ . map_err ( |_| Error :: ReadNoteHeader ) ?;
376
+
377
+ Ok ( Some ( GuestAddress (
378
+ u32:: from_le_bytes ( pvh_addr_bytes) . into ( ) ,
379
+ ) ) )
380
+ }
381
+
283
382
#[ cfg( feature = "bzimage" ) ]
284
383
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
285
384
/// Big zImage (bzImage) kernel image support.
@@ -409,6 +508,23 @@ pub fn load_cmdline<M: GuestMemory>(
409
508
Ok ( ( ) )
410
509
}
411
510
511
+ /// Align address upwards. Taken from x86_64 crate:
512
+ /// https://docs.rs/x86_64/latest/x86_64/fn.align_up.html
513
+ ///
514
+ /// Returns the smallest x with alignment `align` so that x >= addr. The alignment must be
515
+ /// a power of 2.
516
+ #[ cfg( feature = "elf" ) ]
517
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
518
+ fn align_up ( addr : u64 , align : u64 ) -> usize {
519
+ assert ! ( align. is_power_of_two( ) , "`align` must be a power of two" ) ;
520
+ let align_mask = align - 1 ;
521
+ if addr & align_mask == 0 {
522
+ addr as usize // already aligned
523
+ } else {
524
+ ( ( addr | align_mask) + 1 ) as usize
525
+ }
526
+ }
527
+
412
528
#[ cfg( test) ]
413
529
mod test {
414
530
use super :: * ;
0 commit comments