separate bounds-check from alignment check
This commit is contained in:
parent
e24835c6e0
commit
b131fc10ae
@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
||||
// to avoid could be expensive: on the potentially larger types, arrays and slices,
|
||||
// rather than on all aggregates unconditionally.
|
||||
if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
|
||||
let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
|
||||
let Some((size, _align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
|
||||
// We do the walk if we can't determine the size of the mplace: we may be
|
||||
// dealing with extern types here in the future.
|
||||
return Ok(true);
|
||||
@ -267,7 +267,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
||||
|
||||
// If there is no provenance in this allocation, it does not contain references
|
||||
// that point to another allocation, and we can avoid the interning walk.
|
||||
if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size, align)? {
|
||||
if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size)? {
|
||||
if !alloc.has_provenance() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
|
||||
use rustc_middle::ty::GenericArgsRef;
|
||||
use rustc_middle::ty::{Ty, TyCtxt};
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{Abi, Align, Primitive, Size};
|
||||
use rustc_target::abi::{Abi, Primitive, Size};
|
||||
|
||||
use super::{
|
||||
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
|
||||
@ -349,10 +349,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Check that the range between them is dereferenceable ("in-bounds or one past the
|
||||
// end of the same allocation"). This is like the check in ptr_offset_inbounds.
|
||||
let min_ptr = if dist >= 0 { b } else { a };
|
||||
self.check_ptr_access_align(
|
||||
self.check_ptr_access(
|
||||
min_ptr,
|
||||
Size::from_bytes(dist.unsigned_abs()),
|
||||
Align::ONE,
|
||||
CheckInAllocMsg::OffsetFromTest,
|
||||
)?;
|
||||
|
||||
@ -581,10 +580,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// pointers to be properly aligned (unlike a read/write operation).
|
||||
let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
|
||||
// This call handles checking for integer/null pointers.
|
||||
self.check_ptr_access_align(
|
||||
self.check_ptr_access(
|
||||
min_ptr,
|
||||
Size::from_bytes(offset_bytes.unsigned_abs()),
|
||||
Align::ONE,
|
||||
CheckInAllocMsg::PointerArithmeticTest,
|
||||
)?;
|
||||
Ok(offset_ptr)
|
||||
@ -613,7 +611,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let src = self.read_pointer(src)?;
|
||||
let dst = self.read_pointer(dst)?;
|
||||
|
||||
self.mem_copy(src, align, dst, align, size, nonoverlapping)
|
||||
self.check_ptr_align(src, align)?;
|
||||
self.check_ptr_align(dst, align)?;
|
||||
|
||||
self.mem_copy(src, dst, size, nonoverlapping)
|
||||
}
|
||||
|
||||
pub(crate) fn write_bytes_intrinsic(
|
||||
@ -669,7 +670,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
size|
|
||||
-> InterpResult<'tcx, &[u8]> {
|
||||
let ptr = this.read_pointer(op)?;
|
||||
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
|
||||
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
|
||||
// zero-sized access
|
||||
return Ok(&[]);
|
||||
};
|
||||
|
@ -258,14 +258,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
None => self.get_alloc_raw(alloc_id)?.size(),
|
||||
};
|
||||
// This will also call the access hooks.
|
||||
self.mem_copy(
|
||||
ptr,
|
||||
Align::ONE,
|
||||
new_ptr.into(),
|
||||
Align::ONE,
|
||||
old_size.min(new_size),
|
||||
/*nonoverlapping*/ true,
|
||||
)?;
|
||||
self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
|
||||
self.deallocate_ptr(ptr, old_size_and_align, kind)?;
|
||||
|
||||
Ok(new_ptr)
|
||||
@ -367,12 +360,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
&self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
|
||||
self.check_and_deref_ptr(
|
||||
ptr,
|
||||
size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
|alloc_id, offset, prov| {
|
||||
let (size, align) = self
|
||||
@ -382,17 +373,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if the given pointer points to live memory of given `size` and `align`.
|
||||
/// Check if the given pointer points to live memory of the given `size`.
|
||||
/// The caller can control the error message for the out-of-bounds case.
|
||||
#[inline(always)]
|
||||
pub fn check_ptr_access_align(
|
||||
pub fn check_ptr_access(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
msg: CheckInAllocMsg,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.check_and_deref_ptr(ptr, size, align, msg, |alloc_id, _, _| {
|
||||
self.check_and_deref_ptr(ptr, size, msg, |alloc_id, _, _| {
|
||||
let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
|
||||
Ok((size, align, ()))
|
||||
})?;
|
||||
@ -408,7 +398,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
&self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
msg: CheckInAllocMsg,
|
||||
alloc_size: impl FnOnce(
|
||||
AllocId,
|
||||
@ -423,17 +412,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
if size.bytes() > 0 || addr == 0 {
|
||||
throw_ub!(DanglingIntPointer(addr, msg));
|
||||
}
|
||||
// Must be aligned.
|
||||
if M::enforce_alignment(self) && align.bytes() > 1 {
|
||||
self.check_misalign(
|
||||
Self::offset_misalignment(addr, align),
|
||||
CheckAlignMsg::AccessedPtr,
|
||||
)?;
|
||||
}
|
||||
None
|
||||
}
|
||||
Ok((alloc_id, offset, prov)) => {
|
||||
let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
|
||||
let (alloc_size, _alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
|
||||
// Test bounds. This also ensures non-null.
|
||||
// It is sufficient to check this for the end pointer. Also check for overflow!
|
||||
if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
|
||||
@ -449,14 +431,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
if M::Provenance::OFFSET_IS_ADDR {
|
||||
assert_ne!(ptr.addr(), Size::ZERO);
|
||||
}
|
||||
// Test align. Check this last; if both bounds and alignment are violated
|
||||
// we want the error to be about the bounds.
|
||||
if M::enforce_alignment(self) && align.bytes() > 1 {
|
||||
self.check_misalign(
|
||||
self.alloc_misalignment(ptr, offset, align, alloc_align),
|
||||
CheckAlignMsg::AccessedPtr,
|
||||
)?;
|
||||
}
|
||||
|
||||
// We can still be zero-sized in this branch, in which case we have to
|
||||
// return `None`.
|
||||
@ -465,7 +439,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
})
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(super) fn check_misalign(
|
||||
&self,
|
||||
misaligned: Option<Misalignment>,
|
||||
@ -477,53 +450,54 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn offset_misalignment(offset: u64, align: Align) -> Option<Misalignment> {
|
||||
if offset % align.bytes() == 0 {
|
||||
None
|
||||
} else {
|
||||
// The biggest power of two through which `offset` is divisible.
|
||||
let offset_pow2 = 1 << offset.trailing_zeros();
|
||||
Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn alloc_misalignment(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
offset: Size,
|
||||
align: Align,
|
||||
alloc_align: Align,
|
||||
) -> Option<Misalignment> {
|
||||
if M::use_addr_for_alignment_check(self) {
|
||||
// `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
|
||||
Self::offset_misalignment(ptr.addr().bytes(), align)
|
||||
} else {
|
||||
// Check allocation alignment and offset alignment.
|
||||
if alloc_align.bytes() < align.bytes() {
|
||||
Some(Misalignment { has: alloc_align, required: align })
|
||||
} else {
|
||||
Self::offset_misalignment(offset.bytes(), align)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn is_ptr_misaligned(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
align: Align,
|
||||
) -> Option<Misalignment> {
|
||||
if !M::enforce_alignment(self) {
|
||||
if !M::enforce_alignment(self) || align.bytes() == 1 {
|
||||
return None;
|
||||
}
|
||||
match self.ptr_try_get_alloc_id(ptr) {
|
||||
Err(addr) => Self::offset_misalignment(addr, align),
|
||||
Ok((alloc_id, offset, _prov)) => {
|
||||
let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
|
||||
self.alloc_misalignment(ptr, offset, align, alloc_align)
|
||||
|
||||
#[inline]
|
||||
fn offset_misalignment(offset: u64, align: Align) -> Option<Misalignment> {
|
||||
if offset % align.bytes() == 0 {
|
||||
None
|
||||
} else {
|
||||
// The biggest power of two through which `offset` is divisible.
|
||||
let offset_pow2 = 1 << offset.trailing_zeros();
|
||||
Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
|
||||
}
|
||||
}
|
||||
|
||||
match self.ptr_try_get_alloc_id(ptr) {
|
||||
Err(addr) => offset_misalignment(addr, align),
|
||||
Ok((alloc_id, offset, _prov)) => {
|
||||
let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
|
||||
if M::use_addr_for_alignment_check(self) {
|
||||
// `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
|
||||
offset_misalignment(ptr.addr().bytes(), align)
|
||||
} else {
|
||||
// Check allocation alignment and offset alignment.
|
||||
if alloc_align.bytes() < align.bytes() {
|
||||
Some(Misalignment { has: alloc_align, required: align })
|
||||
} else {
|
||||
offset_misalignment(offset.bytes(), align)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks a pointer for misalignment.
|
||||
///
|
||||
/// The error assumes this is checking the pointer used directly for an access.
|
||||
pub fn check_ptr_align(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -629,18 +603,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
}
|
||||
|
||||
/// "Safe" (bounds and align-checked) allocation access.
|
||||
/// Bounds-checked *but not align-checked* allocation access.
|
||||
pub fn get_ptr_alloc<'a>(
|
||||
&'a self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
|
||||
{
|
||||
let ptr_and_alloc = self.check_and_deref_ptr(
|
||||
ptr,
|
||||
size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
|alloc_id, offset, prov| {
|
||||
let alloc = self.get_alloc_raw(alloc_id)?;
|
||||
@ -701,15 +673,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Ok((alloc, &mut self.machine))
|
||||
}
|
||||
|
||||
/// "Safe" (bounds and align-checked) allocation access.
|
||||
/// Bounds-checked *but not align-checked* allocation access.
|
||||
pub fn get_ptr_alloc_mut<'a>(
|
||||
&'a mut self,
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
|
||||
{
|
||||
let parts = self.get_ptr_access(ptr, size, align)?;
|
||||
let parts = self.get_ptr_access(ptr, size)?;
|
||||
if let Some((alloc_id, offset, prov)) = parts {
|
||||
let tcx = *self.tcx;
|
||||
// FIXME: can we somehow avoid looking up the allocation twice here?
|
||||
@ -1066,7 +1037,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: Size,
|
||||
) -> InterpResult<'tcx, &[u8]> {
|
||||
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
|
||||
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
|
||||
// zero-sized access
|
||||
return Ok(&[]);
|
||||
};
|
||||
@ -1092,7 +1063,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
assert_eq!(lower, len, "can only write iterators with a precise length");
|
||||
|
||||
let size = Size::from_bytes(len);
|
||||
let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
|
||||
let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
|
||||
// zero-sized access
|
||||
assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
|
||||
return Ok(());
|
||||
@ -1117,29 +1088,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn mem_copy(
|
||||
&mut self,
|
||||
src: Pointer<Option<M::Provenance>>,
|
||||
src_align: Align,
|
||||
dest: Pointer<Option<M::Provenance>>,
|
||||
dest_align: Align,
|
||||
size: Size,
|
||||
nonoverlapping: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
|
||||
self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
|
||||
}
|
||||
|
||||
pub fn mem_copy_repeatedly(
|
||||
&mut self,
|
||||
src: Pointer<Option<M::Provenance>>,
|
||||
src_align: Align,
|
||||
dest: Pointer<Option<M::Provenance>>,
|
||||
dest_align: Align,
|
||||
size: Size,
|
||||
num_copies: u64,
|
||||
nonoverlapping: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
let tcx = self.tcx;
|
||||
// We need to do our own bounds-checks.
|
||||
let src_parts = self.get_ptr_access(src, size, src_align)?;
|
||||
let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
|
||||
let src_parts = self.get_ptr_access(src, size)?;
|
||||
let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
|
||||
|
||||
// FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
|
||||
// and once below to get the underlying `&[mut] Allocation`.
|
||||
|
@ -460,7 +460,7 @@ where
|
||||
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
|
||||
// We check alignment separately, and *after* checking everything else.
|
||||
// If an access is both OOB and misaligned, we want to see the bounds error.
|
||||
let a = self.get_ptr_alloc(mplace.ptr(), size, Align::ONE)?;
|
||||
let a = self.get_ptr_alloc(mplace.ptr(), size)?;
|
||||
self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
|
||||
Ok(a)
|
||||
}
|
||||
@ -478,7 +478,7 @@ where
|
||||
// If an access is both OOB and misaligned, we want to see the bounds error.
|
||||
// However we have to call `check_misalign` first to make the borrow checker happy.
|
||||
let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
|
||||
let a = self.get_ptr_alloc_mut(mplace.ptr(), size, Align::ONE)?;
|
||||
let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
|
||||
misalign_err?;
|
||||
Ok(a)
|
||||
}
|
||||
@ -873,14 +873,7 @@ where
|
||||
// non-overlapping.)
|
||||
// We check alignment separately, and *after* checking everything else.
|
||||
// If an access is both OOB and misaligned, we want to see the bounds error.
|
||||
self.mem_copy(
|
||||
src.ptr(),
|
||||
Align::ONE,
|
||||
dest.ptr(),
|
||||
Align::ONE,
|
||||
dest_size,
|
||||
/*nonoverlapping*/ true,
|
||||
)?;
|
||||
self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
|
||||
self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
|
||||
self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
|
||||
Ok(())
|
||||
|
@ -7,7 +7,6 @@ use either::Either;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::{InterpResult, Scalar};
|
||||
use rustc_middle::ty::layout::LayoutOf;
|
||||
use rustc_target::abi::Align;
|
||||
|
||||
use super::{ImmTy, InterpCx, Machine, Projectable};
|
||||
use crate::util;
|
||||
@ -210,9 +209,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// No alignment requirement since `copy_op` above already checked it.
|
||||
self.mem_copy_repeatedly(
|
||||
first_ptr,
|
||||
Align::ONE,
|
||||
rest_ptr,
|
||||
Align::ONE,
|
||||
elem_size,
|
||||
length - 1,
|
||||
/*nonoverlapping:*/ true,
|
||||
|
@ -20,7 +20,7 @@ use rustc_middle::ty;
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{
|
||||
Abi, Align, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
|
||||
Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
|
||||
};
|
||||
|
||||
use std::hash::Hash;
|
||||
@ -378,18 +378,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
.unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
|
||||
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
|
||||
try_validation!(
|
||||
self.ecx.check_ptr_access_align(
|
||||
self.ecx.check_ptr_access(
|
||||
place.ptr(),
|
||||
size,
|
||||
align,
|
||||
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
|
||||
),
|
||||
self.path,
|
||||
Ub(AlignmentCheckFailed(Misalignment { required, has }, _msg)) => UnalignedPtr {
|
||||
ptr_kind,
|
||||
required_bytes: required.bytes(),
|
||||
found_bytes: has.bytes()
|
||||
},
|
||||
Ub(DanglingIntPointer(0, _)) => NullPtr { ptr_kind },
|
||||
Ub(DanglingIntPointer(i, _)) => DanglingPtrNoProvenance {
|
||||
ptr_kind,
|
||||
@ -405,6 +399,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
ptr_kind,
|
||||
},
|
||||
);
|
||||
try_validation!(
|
||||
self.ecx.check_ptr_align(
|
||||
place.ptr(),
|
||||
align,
|
||||
),
|
||||
self.path,
|
||||
Ub(AlignmentCheckFailed(Misalignment { required, has }, _msg)) => UnalignedPtr {
|
||||
ptr_kind,
|
||||
required_bytes: required.bytes(),
|
||||
found_bytes: has.bytes()
|
||||
},
|
||||
);
|
||||
// Do not allow pointers to uninhabited types.
|
||||
if place.layout.abi.is_uninhabited() {
|
||||
let ty = place.layout.ty;
|
||||
@ -782,7 +788,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
// NOTE: Keep this in sync with the handling of integer and float
|
||||
// types above, in `visit_primitive`.
|
||||
// No need for an alignment check here, this is not an actual memory access.
|
||||
let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size, Align::ONE)?.expect("we already excluded size 0");
|
||||
let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size)?.expect("we already excluded size 0");
|
||||
|
||||
match alloc.get_bytes_strip_provenance() {
|
||||
// In the happy case, we needn't check anything else.
|
||||
|
@ -14,7 +14,7 @@ use log::trace;
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_middle::mir::{Mutability, RetagKind};
|
||||
use rustc_middle::ty::{self, layout::HasParamEnv, Ty};
|
||||
use rustc_target::abi::{Abi, Align, Size};
|
||||
use rustc_target::abi::{Abi, Size};
|
||||
|
||||
use crate::borrow_tracker::{
|
||||
stacked_borrows::diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder},
|
||||
@ -616,7 +616,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
) -> InterpResult<'tcx, Option<Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
// Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
|
||||
this.check_ptr_access_align(place.ptr(), size, Align::ONE, CheckInAllocMsg::InboundsTest)?;
|
||||
this.check_ptr_access(place.ptr(), size, CheckInAllocMsg::InboundsTest)?;
|
||||
|
||||
// It is crucial that this gets called on all code paths, to ensure we track tag creation.
|
||||
let log_creation = |this: &MiriInterpCx<'mir, 'tcx>,
|
||||
|
@ -1,6 +1,6 @@
|
||||
use log::trace;
|
||||
|
||||
use rustc_target::abi::{Abi, Align, Size};
|
||||
use rustc_target::abi::{Abi, Size};
|
||||
|
||||
use crate::borrow_tracker::{
|
||||
AccessKind, GlobalState, GlobalStateInner, ProtectorKind, RetagFields,
|
||||
@ -206,10 +206,9 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
// Make sure the new permission makes sense as the initial permission of a fresh tag.
|
||||
assert!(new_perm.initial_state.is_initial());
|
||||
// Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
|
||||
this.check_ptr_access_align(
|
||||
this.check_ptr_access(
|
||||
place.ptr(),
|
||||
ptr_size,
|
||||
Align::ONE,
|
||||
CheckInAllocMsg::InboundsTest,
|
||||
)?;
|
||||
|
||||
|
@ -1017,11 +1017,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
this.check_ptr_align(
|
||||
place.ptr(),
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
|
||||
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
|
||||
|
@ -785,7 +785,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
loop {
|
||||
// FIXME: We are re-getting the allocation each time around the loop.
|
||||
// Would be nice if we could somehow "extend" an existing AllocRange.
|
||||
let alloc = this.get_ptr_alloc(ptr.offset(len, this)?, size1, Align::ONE)?.unwrap(); // not a ZST, so we will get a result
|
||||
let alloc = this.get_ptr_alloc(ptr.offset(len, this)?, size1)?.unwrap(); // not a ZST, so we will get a result
|
||||
let byte = alloc.read_integer(alloc_range(Size::ZERO, size1))?.to_u8()?;
|
||||
if byte == 0 {
|
||||
break;
|
||||
@ -825,13 +825,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
fn read_wide_str(&self, mut ptr: Pointer<Option<Provenance>>) -> InterpResult<'tcx, Vec<u16>> {
|
||||
let this = self.eval_context_ref();
|
||||
let size2 = Size::from_bytes(2);
|
||||
let align2 = Align::from_bytes(2).unwrap();
|
||||
this.check_ptr_align(ptr, Align::from_bytes(2).unwrap())?;
|
||||
|
||||
let mut wchars = Vec::new();
|
||||
loop {
|
||||
// FIXME: We are re-getting the allocation each time around the loop.
|
||||
// Would be nice if we could somehow "extend" an existing AllocRange.
|
||||
let alloc = this.get_ptr_alloc(ptr, size2, align2)?.unwrap(); // not a ZST, so we will get a result
|
||||
let alloc = this.get_ptr_alloc(ptr, size2)?.unwrap(); // not a ZST, so we will get a result
|
||||
let wchar = alloc.read_integer(alloc_range(Size::ZERO, size2))?.to_u16()?;
|
||||
if wchar == 0 {
|
||||
break;
|
||||
@ -867,8 +867,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
// Store the UTF-16 string.
|
||||
let size2 = Size::from_bytes(2);
|
||||
let this = self.eval_context_mut();
|
||||
this.check_ptr_align(ptr, Align::from_bytes(2).unwrap())?;
|
||||
let mut alloc = this
|
||||
.get_ptr_alloc_mut(ptr, size2 * string_length, Align::from_bytes(2).unwrap())?
|
||||
.get_ptr_alloc_mut(ptr, size2 * string_length)?
|
||||
.unwrap(); // not a ZST, so we will get a result
|
||||
for (offset, wchar) in wide_str.iter().copied().chain(iter::once(0x0000)).enumerate() {
|
||||
let offset = u64::try_from(offset).unwrap();
|
||||
|
@ -807,9 +807,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
|
||||
this.mem_copy(
|
||||
ptr_src,
|
||||
Align::ONE,
|
||||
ptr_dest,
|
||||
Align::ONE,
|
||||
Size::from_bytes(n),
|
||||
true,
|
||||
)?;
|
||||
@ -830,9 +828,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
let n = this.read_c_str(ptr_src)?.len().checked_add(1).unwrap();
|
||||
this.mem_copy(
|
||||
ptr_src,
|
||||
Align::ONE,
|
||||
ptr_dest,
|
||||
Align::ONE,
|
||||
Size::from_bytes(n),
|
||||
true,
|
||||
)?;
|
||||
|
@ -13,7 +13,7 @@ use log::trace;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_target::abi::{Align, Size};
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::shims::os_str::bytes_to_os_str;
|
||||
use crate::*;
|
||||
@ -756,10 +756,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
trace!("Reading from FD {}, size {}", fd, count);
|
||||
|
||||
// Check that the *entire* buffer is actually valid memory.
|
||||
this.check_ptr_access_align(
|
||||
this.check_ptr_access(
|
||||
buf,
|
||||
Size::from_bytes(count),
|
||||
Align::ONE,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
|
||||
@ -810,10 +809,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
// Isolation check is done via `FileDescriptor` trait.
|
||||
|
||||
// Check that the *entire* buffer is actually valid memory.
|
||||
this.check_ptr_access_align(
|
||||
this.check_ptr_access(
|
||||
buf,
|
||||
Size::from_bytes(count),
|
||||
Align::ONE,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
use rustc_middle::mir;
|
||||
use rustc_span::Symbol;
|
||||
use rustc_target::abi::Align;
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use super::horizontal_bin_op;
|
||||
@ -76,9 +75,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
|
||||
|
||||
this.mem_copy(
|
||||
src_ptr,
|
||||
Align::ONE,
|
||||
dest.ptr(),
|
||||
Align::ONE,
|
||||
dest.layout.size,
|
||||
/*nonoverlapping*/ true,
|
||||
)?;
|
||||
|
Loading…
x
Reference in New Issue
Block a user