@@ -934,6 +934,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
934
934
Some ( a) => a,
935
935
None => return Ok ( & [ ] ) , // zero-sized access
936
936
} ;
937
+ // Side-step AllocRef and directly access the underlying bytes more efficiently.
938
+ // (We are staying inside the bounds here so all is good.)
937
939
Ok ( alloc_ref
938
940
. alloc
939
941
. get_bytes ( & alloc_ref. tcx , alloc_ref. range )
@@ -1006,7 +1008,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
1006
1008
let src = self . check_ptr_access ( src, size, src_align) ?;
1007
1009
let dest = self . check_ptr_access ( dest, size * num_copies, dest_align) ?; // `Size` multiplication
1008
1010
1009
- // FIXME: avoid looking up allocations more often than necessary.
1011
+ // FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
1012
+ // and once below to get the underlying `&[mut] Allocation`.
1010
1013
1011
1014
// Source alloc preparations and access hooks.
1012
1015
let src = match src {
@@ -1033,6 +1036,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
1033
1036
dest. offset ,
1034
1037
num_copies,
1035
1038
) ;
1039
+ // Prepare a copy of the initialization mask.
1040
+ let compressed = src_alloc. compress_uninit_range ( src, size) ;
1036
1041
// This checks relocation edges on the src.
1037
1042
let src_bytes = src_alloc
1038
1043
. get_bytes_with_uninit_and_ptr ( & tcx, alloc_range ( src. offset , size) )
@@ -1046,17 +1051,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
1046
1051
. get_bytes_mut_ptr ( & tcx, alloc_range ( dest. offset , size * num_copies) )
1047
1052
. as_mut_ptr ( ) ;
1048
1053
1049
- // Prepare a copy of the initialization mask.
1050
- let compressed = self . get_raw ( src. alloc_id ) ?. compress_uninit_range ( src, size) ;
1051
-
1052
1054
if compressed. no_bytes_init ( ) {
1053
1055
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1054
1056
// is marked as uninitialized but we otherwise omit changing the byte representation which may
1055
1057
// be arbitrary for uninitialized bytes.
1056
1058
// This also avoids writing to the target bytes so that the backing allocation is never
1057
1059
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1058
1060
// operating system this can avoid physically allocating the page.
1059
- let dest_alloc = self . get_raw_mut ( dest. alloc_id ) ?. 0 ;
1060
1061
dest_alloc. mark_init ( alloc_range ( dest. offset , size * num_copies) , false ) ; // `Size` multiplication
1061
1062
dest_alloc. mark_relocation_range ( relocations) ;
1062
1063
return Ok ( ( ) ) ;
@@ -1096,7 +1097,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
1096
1097
}
1097
1098
}
1098
1099
1099
- let dest_alloc = self . get_raw_mut ( dest. alloc_id ) ?. 0 ;
1100
1100
// now fill in all the "init" data
1101
1101
dest_alloc. mark_compressed_init_range ( & compressed, dest, size, num_copies) ;
1102
1102
// copy the relocations to the destination
0 commit comments