Skip to content

Commit 74995c4

Browse files
committed
reduce number of allocation lookups during copy
1 parent 46c2286 commit 74995c4

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

compiler/rustc_mir/src/interpret/memory.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -934,6 +934,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
934934
Some(a) => a,
935935
None => return Ok(&[]), // zero-sized access
936936
};
937+
// Side-step AllocRef and directly access the underlying bytes more efficiently.
938+
// (We are staying inside the bounds here so all is good.)
937939
Ok(alloc_ref
938940
.alloc
939941
.get_bytes(&alloc_ref.tcx, alloc_ref.range)
@@ -1006,7 +1008,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
10061008
let src = self.check_ptr_access(src, size, src_align)?;
10071009
let dest = self.check_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
10081010

1009-
// FIXME: avoid looking up allocations more often than necessary.
1011+
// FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
1012+
// and once below to get the underlying `&[mut] Allocation`.
10101013

10111014
// Source alloc preparations and access hooks.
10121015
let src = match src {
@@ -1033,6 +1036,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
10331036
dest.offset,
10341037
num_copies,
10351038
);
1039+
// Prepare a copy of the initialization mask.
1040+
let compressed = src_alloc.compress_uninit_range(src, size);
10361041
// This checks relocation edges on the src.
10371042
let src_bytes = src_alloc
10381043
.get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size))
@@ -1046,17 +1051,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
10461051
.get_bytes_mut_ptr(&tcx, alloc_range(dest.offset, size * num_copies))
10471052
.as_mut_ptr();
10481053

1049-
// Prepare a copy of the initialization mask.
1050-
let compressed = self.get_raw(src.alloc_id)?.compress_uninit_range(src, size);
1051-
10521054
if compressed.no_bytes_init() {
10531055
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
10541056
// is marked as uninitialized but we otherwise omit changing the byte representation which may
10551057
// be arbitrary for uninitialized bytes.
10561058
// This also avoids writing to the target bytes so that the backing allocation is never
10571059
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
10581060
// operating system this can avoid physically allocating the page.
1059-
let dest_alloc = self.get_raw_mut(dest.alloc_id)?.0;
10601061
dest_alloc.mark_init(alloc_range(dest.offset, size * num_copies), false); // `Size` multiplication
10611062
dest_alloc.mark_relocation_range(relocations);
10621063
return Ok(());
@@ -1096,7 +1097,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
10961097
}
10971098
}
10981099

1099-
let dest_alloc = self.get_raw_mut(dest.alloc_id)?.0;
11001100
// now fill in all the "init" data
11011101
dest_alloc.mark_compressed_init_range(&compressed, dest, size, num_copies);
11021102
// copy the relocations to the destination

0 commit comments

Comments
 (0)