Avoid copying some undef memory in MIR
During MIR interpretation it may happen that a place containing uninitialized bytes is copied. This would read the current representation of these bytes and write it to the destination even though they must, by definition, not matter to the execution. This elides that representation change when no bytes are defined in such a copy, saving some cpu cycles. In such a case, the memory of the target allocation is not touched at all which also means that sometimes no physical page backing the memory allocation of the representation needs to be provided by the OS at all, reducing memory pressure on the system.
This commit is contained in:
parent
bd93b7718e
commit
250a636217
@ -594,6 +594,14 @@ pub struct AllocationDefinedness {
|
|||||||
ranges: smallvec::SmallVec<[u64; 1]>,
|
ranges: smallvec::SmallVec<[u64; 1]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AllocationDefinedness {
|
||||||
|
pub fn all_bytes_undef(&self) -> bool {
|
||||||
|
// The `ranges` are run-length encoded and of alternating definedness.
|
||||||
|
// So if `ranges.len() > 1` then the second block is a range of defined.
|
||||||
|
self.initial == false && self.ranges.len() == 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Transferring the definedness mask to other allocations.
|
/// Transferring the definedness mask to other allocations.
|
||||||
impl<Tag, Extra> Allocation<Tag, Extra> {
|
impl<Tag, Extra> Allocation<Tag, Extra> {
|
||||||
/// Creates a run-length encoding of the undef mask.
|
/// Creates a run-length encoding of the undef mask.
|
||||||
|
@ -841,6 +841,9 @@ pub fn copy_repeatedly(
|
|||||||
|
|
||||||
let tcx = self.tcx.tcx;
|
let tcx = self.tcx.tcx;
|
||||||
|
|
||||||
|
// The bits have to be saved locally before writing to dest in case src and dest overlap.
|
||||||
|
assert_eq!(size.bytes() as usize as u64, size.bytes());
|
||||||
|
|
||||||
// This checks relocation edges on the src.
|
// This checks relocation edges on the src.
|
||||||
let src_bytes =
|
let src_bytes =
|
||||||
self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
|
self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
|
||||||
@ -855,6 +858,22 @@ pub fn copy_repeatedly(
|
|||||||
|
|
||||||
let dest_bytes = dest_bytes.as_mut_ptr();
|
let dest_bytes = dest_bytes.as_mut_ptr();
|
||||||
|
|
||||||
|
// Prepare a copy of the undef mask.
|
||||||
|
let compressed = self.get_raw(src.alloc_id)?.compress_undef_range(src, size);
|
||||||
|
|
||||||
|
if compressed.all_bytes_undef() {
|
||||||
|
// Fast path: If all bytes are `undef` then there is nothing to copy. The target range
|
||||||
|
// is marked as undef but we otherwise omit changing the byte representation which may
|
||||||
|
// be arbitrary for undef bytes.
|
||||||
|
// This also avoids writing to the target bytes so that the backing allocation is never
|
||||||
|
// touched if the bytes stay undef for the whole interpreter execution. On contemporary
|
||||||
|
// operating system this can avoid physically allocating the page.
|
||||||
|
let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
|
||||||
|
dest_alloc.mark_definedness(dest, size * length, false);
|
||||||
|
dest_alloc.mark_relocation_range(relocations);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
|
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
|
||||||
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
|
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
|
||||||
// `dest` could possibly overlap.
|
// `dest` could possibly overlap.
|
||||||
@ -889,8 +908,14 @@ pub fn copy_repeatedly(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy definedness to the destination
|
// now fill in all the data
|
||||||
self.copy_undef_mask(src, dest, size, length)?;
|
self.get_raw_mut(dest.alloc_id)?.mark_compressed_undef_range(
|
||||||
|
&compressed,
|
||||||
|
dest,
|
||||||
|
size,
|
||||||
|
length,
|
||||||
|
);
|
||||||
|
|
||||||
// copy the relocations to the destination
|
// copy the relocations to the destination
|
||||||
self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
|
self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
|
||||||
|
|
||||||
@ -898,29 +923,8 @@ pub fn copy_repeatedly(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Undefined bytes
|
/// Machine pointer introspection.
|
||||||
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||||
// FIXME: Add a fast version for the common, nonoverlapping case
|
|
||||||
fn copy_undef_mask(
|
|
||||||
&mut self,
|
|
||||||
src: Pointer<M::PointerTag>,
|
|
||||||
dest: Pointer<M::PointerTag>,
|
|
||||||
size: Size,
|
|
||||||
repeat: u64,
|
|
||||||
) -> InterpResult<'tcx> {
|
|
||||||
// The bits have to be saved locally before writing to dest in case src and dest overlap.
|
|
||||||
assert_eq!(size.bytes() as usize as u64, size.bytes());
|
|
||||||
|
|
||||||
let src_alloc = self.get_raw(src.alloc_id)?;
|
|
||||||
let compressed = src_alloc.compress_undef_range(src, size);
|
|
||||||
|
|
||||||
// now fill in all the data
|
|
||||||
let dest_allocation = self.get_raw_mut(dest.alloc_id)?;
|
|
||||||
dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn force_ptr(
|
pub fn force_ptr(
|
||||||
&self,
|
&self,
|
||||||
scalar: Scalar<M::PointerTag>,
|
scalar: Scalar<M::PointerTag>,
|
||||||
|
Loading…
Reference in New Issue
Block a user