Auto merge of #62441 - RalfJung:place-ptr-normalization, r=oli-obk
Miri: Provide pointer forcing methods for MemPlace and Op These are useful when one wants to to a lot of work with some place or operand and not to int-to-ptr casts all the time. In particular, this is needed to fix some test failures in Miri: we need to normalize before starting a visitor that walks a run-time value, so that we can later be sure (during the visitor walk) that we have a proper `Pointer`. Also see the Miri side at https://github.com/rust-lang/miri/pull/830. Cc @eddyb @oli-obk
This commit is contained in:
commit
d4e1565509
@ -262,19 +262,6 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns this pointer's offset from the allocation base, or from NULL (for
|
||||
/// integer pointers).
|
||||
#[inline]
|
||||
pub fn get_ptr_offset(self, cx: &impl HasDataLayout) -> Size {
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(size as u64, cx.pointer_size().bytes());
|
||||
Size::from_bytes(data as u64)
|
||||
}
|
||||
Scalar::Ptr(ptr) => ptr.offset,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_bool(b: bool) -> Self {
|
||||
Scalar::Raw { data: b as u128, size: 1 }
|
||||
@ -339,6 +326,10 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
Scalar::Raw { data: f.to_bits(), size: 8 }
|
||||
}
|
||||
|
||||
/// This is very rarely the method you want! You should dispatch on the type
|
||||
/// and use `force_bits`/`assert_bits`/`force_ptr`/`assert_ptr`.
|
||||
/// This method only exists for the benefit of low-level memory operations
|
||||
/// as well as the implementation of the `force_*` methods.
|
||||
#[inline]
|
||||
pub fn to_bits_or_ptr(
|
||||
self,
|
||||
@ -359,6 +350,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Do not call this method! Use either `assert_bits` or `force_bits`.
|
||||
#[inline]
|
||||
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
|
||||
match self {
|
||||
@ -372,6 +364,12 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn assert_bits(self, target_size: Size) -> u128 {
|
||||
self.to_bits(target_size).expect("Expected Raw bits but got a Pointer")
|
||||
}
|
||||
|
||||
/// Do not call this method! Use either `assert_ptr` or `force_ptr`.
|
||||
#[inline]
|
||||
pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
|
||||
match self {
|
||||
@ -381,6 +379,12 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn assert_ptr(self) -> Pointer<Tag> {
|
||||
self.to_ptr().expect("Expected a Pointer but got Raw bits")
|
||||
}
|
||||
|
||||
/// Do not call this method! Dispatch based on the type instead.
|
||||
#[inline]
|
||||
pub fn is_bits(self) -> bool {
|
||||
match self {
|
||||
@ -389,6 +393,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Do not call this method! Dispatch based on the type instead.
|
||||
#[inline]
|
||||
pub fn is_ptr(self) -> bool {
|
||||
match self {
|
||||
@ -536,11 +541,13 @@ impl<'tcx, Tag> ScalarMaybeUndef<Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Do not call this method! Use either `assert_ptr` or `force_ptr`.
|
||||
#[inline(always)]
|
||||
pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
|
||||
self.not_undef()?.to_ptr()
|
||||
}
|
||||
|
||||
/// Do not call this method! Use either `assert_bits` or `force_bits`.
|
||||
#[inline(always)]
|
||||
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
|
||||
self.not_undef()?.to_bits(target_size)
|
||||
|
@ -109,7 +109,7 @@ fn op_to_const<'tcx>(
|
||||
// `Immediate` is when we are called from `const_field`, and that `Immediate`
|
||||
// comes from a constant so it can happen have `Undef`, because the indirect
|
||||
// memory that was read had undefined bytes.
|
||||
let mplace = op.to_mem_place();
|
||||
let mplace = op.assert_mem_place();
|
||||
let ptr = mplace.ptr.to_ptr().unwrap();
|
||||
let alloc = ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id);
|
||||
ConstValue::ByRef { offset: ptr.offset, align: mplace.align, alloc }
|
||||
@ -661,7 +661,7 @@ pub fn const_eval_raw_provider<'tcx>(
|
||||
|body| eval_body_using_ecx(&mut ecx, cid, body, key.param_env)
|
||||
).and_then(|place| {
|
||||
Ok(RawConst {
|
||||
alloc_id: place.to_ptr().expect("we allocated this ptr!").alloc_id,
|
||||
alloc_id: place.ptr.assert_ptr().alloc_id,
|
||||
ty: place.layout.ty
|
||||
})
|
||||
}).map_err(|error| {
|
||||
|
@ -214,10 +214,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
None => Size::from_bytes(self.get(ptr.alloc_id)?.bytes.len() as u64),
|
||||
};
|
||||
self.copy(
|
||||
ptr.into(),
|
||||
Align::from_bytes(1).unwrap(), // old_align anyway gets checked below by `deallocate`
|
||||
new_ptr.into(),
|
||||
new_align,
|
||||
ptr,
|
||||
new_ptr,
|
||||
old_size.min(new_size),
|
||||
/*nonoverlapping*/ true,
|
||||
)?;
|
||||
@ -310,6 +308,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
/// `Pointer` they need. And even if you already have a `Pointer`, call this method
|
||||
/// to make sure it is sufficiently aligned and not dangling. Not doing that may
|
||||
/// cause ICEs.
|
||||
///
|
||||
/// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
|
||||
/// this method is still appropriate.
|
||||
pub fn check_ptr_access(
|
||||
&self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
@ -751,39 +752,26 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
self.get(ptr.alloc_id)?.read_c_str(self, ptr)
|
||||
}
|
||||
|
||||
/// Performs appropriate bounds checks.
|
||||
/// Expects the caller to have checked bounds and alignment.
|
||||
pub fn copy(
|
||||
&mut self,
|
||||
src: Scalar<M::PointerTag>,
|
||||
src_align: Align,
|
||||
dest: Scalar<M::PointerTag>,
|
||||
dest_align: Align,
|
||||
src: Pointer<M::PointerTag>,
|
||||
dest: Pointer<M::PointerTag>,
|
||||
size: Size,
|
||||
nonoverlapping: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
|
||||
self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
|
||||
}
|
||||
|
||||
/// Performs appropriate bounds checks.
|
||||
/// Expects the caller to have checked bounds and alignment.
|
||||
pub fn copy_repeatedly(
|
||||
&mut self,
|
||||
src: Scalar<M::PointerTag>,
|
||||
src_align: Align,
|
||||
dest: Scalar<M::PointerTag>,
|
||||
dest_align: Align,
|
||||
src: Pointer<M::PointerTag>,
|
||||
dest: Pointer<M::PointerTag>,
|
||||
size: Size,
|
||||
length: u64,
|
||||
nonoverlapping: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
// We need to check *both* before early-aborting due to the size being 0.
|
||||
let (src, dest) = match (self.check_ptr_access(src, size, src_align)?,
|
||||
self.check_ptr_access(dest, size * length, dest_align)?)
|
||||
{
|
||||
(Some(src), Some(dest)) => (src, dest),
|
||||
// One of the two sizes is 0.
|
||||
_ => return Ok(()),
|
||||
};
|
||||
|
||||
// first copy the relocations to a temporary buffer, because
|
||||
// `get_bytes_mut` will clear the relocations, which is correct,
|
||||
// since we don't want to keep any relocations at the target.
|
||||
|
@ -123,23 +123,23 @@ pub enum Operand<Tag=(), Id=AllocId> {
|
||||
|
||||
impl<Tag> Operand<Tag> {
|
||||
#[inline]
|
||||
pub fn to_mem_place(self) -> MemPlace<Tag>
|
||||
pub fn assert_mem_place(self) -> MemPlace<Tag>
|
||||
where Tag: ::std::fmt::Debug
|
||||
{
|
||||
match self {
|
||||
Operand::Indirect(mplace) => mplace,
|
||||
_ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self),
|
||||
_ => bug!("assert_mem_place: expected Operand::Indirect, got {:?}", self),
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_immediate(self) -> Immediate<Tag>
|
||||
pub fn assert_immediate(self) -> Immediate<Tag>
|
||||
where Tag: ::std::fmt::Debug
|
||||
{
|
||||
match self {
|
||||
Operand::Immediate(imm) => imm,
|
||||
_ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
|
||||
_ => bug!("assert_immediate: expected Operand::Immediate, got {:?}", self),
|
||||
|
||||
}
|
||||
}
|
||||
@ -214,6 +214,19 @@ pub(super) fn from_known_layout<'tcx>(
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
|
||||
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
|
||||
#[inline]
|
||||
pub fn force_op_ptr(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
match op.try_as_mplace() {
|
||||
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
|
||||
Err(imm) => Ok(imm.into()), // Nothing to cast/force
|
||||
}
|
||||
}
|
||||
|
||||
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
|
||||
/// Returns `None` if the layout does not permit loading this as a value.
|
||||
fn try_read_immediate_from_mplace(
|
||||
@ -224,9 +237,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Don't touch unsized
|
||||
return Ok(None);
|
||||
}
|
||||
let (ptr, ptr_align) = mplace.to_scalar_ptr_align();
|
||||
|
||||
let ptr = match self.memory.check_ptr_access(ptr, mplace.layout.size, ptr_align)? {
|
||||
let ptr = match self.check_mplace_access(mplace, None)? {
|
||||
Some(ptr) => ptr,
|
||||
None => return Ok(Some(ImmTy { // zero-sized type
|
||||
imm: Immediate::Scalar(Scalar::zst().into()),
|
||||
@ -396,7 +408,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
} else {
|
||||
// The rest should only occur as mplace, we do not use Immediates for types
|
||||
// allowing such operations. This matches place_projection forcing an allocation.
|
||||
let mplace = base.to_mem_place();
|
||||
let mplace = base.assert_mem_place();
|
||||
self.mplace_projection(mplace, proj_elem)?.into()
|
||||
}
|
||||
})
|
||||
|
@ -122,21 +122,6 @@ impl<Tag> MemPlace<Tag> {
|
||||
Self::from_scalar_ptr(ptr.into(), align)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
|
||||
assert!(self.meta.is_none());
|
||||
(self.ptr, self.align)
|
||||
}
|
||||
|
||||
/// metact the ptr part of the mplace
|
||||
#[inline(always)]
|
||||
pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
|
||||
// At this point, we forget about the alignment information --
|
||||
// the place has been turned into a reference, and no matter where it came from,
|
||||
// it now must be aligned.
|
||||
self.to_scalar_ptr_align().0.to_ptr()
|
||||
}
|
||||
|
||||
/// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space.
|
||||
/// This is the inverse of `ref_to_mplace`.
|
||||
#[inline(always)]
|
||||
@ -230,6 +215,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
|
||||
}
|
||||
}
|
||||
|
||||
// These are defined here because they produce a place.
|
||||
impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
|
||||
@ -240,12 +226,12 @@ impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
|
||||
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
|
||||
self.try_as_mplace().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
|
||||
impl<Tag: ::std::fmt::Debug> Place<Tag> {
|
||||
/// Produces a Place that will error if attempted to be read from or written to
|
||||
#[inline(always)]
|
||||
pub fn null(cx: &impl HasDataLayout) -> Self {
|
||||
@ -263,29 +249,19 @@ impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_mem_place(self) -> MemPlace<Tag> {
|
||||
pub fn assert_mem_place(self) -> MemPlace<Tag> {
|
||||
match self {
|
||||
Place::Ptr(mplace) => mplace,
|
||||
_ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self),
|
||||
_ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self),
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
|
||||
self.to_mem_place().to_scalar_ptr_align()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
|
||||
self.to_mem_place().to_ptr()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> {
|
||||
#[inline]
|
||||
pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
|
||||
MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout }
|
||||
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
|
||||
MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
|
||||
}
|
||||
}
|
||||
|
||||
@ -301,8 +277,6 @@ where
|
||||
{
|
||||
/// Take a value, which represents a (thin or fat) reference, and make it a place.
|
||||
/// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
|
||||
/// This does NOT call the "deref" machine hook, so it does NOT count as a
|
||||
/// deref as far as Stacked Borrows is concerned. Use `deref_operand` for that!
|
||||
pub fn ref_to_mplace(
|
||||
&self,
|
||||
val: ImmTy<'tcx, M::PointerTag>,
|
||||
@ -322,8 +296,8 @@ where
|
||||
Ok(MPlaceTy { mplace, layout })
|
||||
}
|
||||
|
||||
// Take an operand, representing a pointer, and dereference it to a place -- that
|
||||
// will always be a MemPlace. Lives in `place.rs` because it creates a place.
|
||||
/// Take an operand, representing a pointer, and dereference it to a place -- that
|
||||
/// will always be a MemPlace. Lives in `place.rs` because it creates a place.
|
||||
pub fn deref_operand(
|
||||
&self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
@ -333,6 +307,36 @@ where
|
||||
self.ref_to_mplace(val)
|
||||
}
|
||||
|
||||
/// Check if the given place is good for memory access with the given
|
||||
/// size, falling back to the layout's size if `None` (in the latter case,
|
||||
/// this must be a statically sized type).
|
||||
///
|
||||
/// On success, returns `None` for zero-sized accesses (where nothing else is
|
||||
/// left to do) and a `Pointer` to use for the actual access otherwise.
|
||||
#[inline]
|
||||
pub fn check_mplace_access(
|
||||
&self,
|
||||
place: MPlaceTy<'tcx, M::PointerTag>,
|
||||
size: Option<Size>,
|
||||
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
|
||||
let size = size.unwrap_or_else(|| {
|
||||
assert!(!place.layout.is_unsized());
|
||||
assert!(place.meta.is_none());
|
||||
place.layout.size
|
||||
});
|
||||
self.memory.check_ptr_access(place.ptr, size, place.align)
|
||||
}
|
||||
|
||||
/// Force `place.ptr` to a `Pointer`.
|
||||
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
|
||||
pub fn force_mplace_ptr(
|
||||
&self,
|
||||
mut place: MPlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into();
|
||||
Ok(place)
|
||||
}
|
||||
|
||||
/// Offset a pointer to project to a field. Unlike `place_field`, this is always
|
||||
/// possible without allocating, so it can take `&self`. Also return the field's layout.
|
||||
/// This supports both struct and array fields.
|
||||
@ -741,14 +745,12 @@ where
|
||||
value: Immediate<M::PointerTag>,
|
||||
dest: MPlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
|
||||
// Note that it is really important that the type here is the right one, and matches the
|
||||
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
|
||||
// to handle padding properly, which is only correct if we never look at this data with the
|
||||
// wrong type.
|
||||
assert!(!dest.layout.is_unsized());
|
||||
|
||||
let ptr = match self.memory.check_ptr_access(ptr, dest.layout.size, ptr_align)? {
|
||||
let ptr = match self.check_mplace_access(dest, None)? {
|
||||
Some(ptr) => ptr,
|
||||
None => return Ok(()), // zero-sized access
|
||||
};
|
||||
@ -850,14 +852,21 @@ where
|
||||
dest.layout.size
|
||||
});
|
||||
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
|
||||
|
||||
let src = self.check_mplace_access(src, Some(size))?;
|
||||
let dest = self.check_mplace_access(dest, Some(size))?;
|
||||
let (src_ptr, dest_ptr) = match (src, dest) {
|
||||
(Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
|
||||
(None, None) => return Ok(()), // zero-sized copy
|
||||
_ => bug!("The pointers should both be Some or both None"),
|
||||
};
|
||||
|
||||
self.memory.copy(
|
||||
src.ptr, src.align,
|
||||
dest.ptr, dest.align,
|
||||
src_ptr,
|
||||
dest_ptr,
|
||||
size,
|
||||
/*nonoverlapping*/ true,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
)
|
||||
}
|
||||
|
||||
/// Copies the data from an operand to a place. The layouts may disagree, but they must
|
||||
|
@ -209,17 +209,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let dest = self.force_allocation(dest)?;
|
||||
let length = dest.len(self)?;
|
||||
|
||||
if length > 0 {
|
||||
// write the first
|
||||
if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
|
||||
// Write the first.
|
||||
let first = self.mplace_field(dest, 0)?;
|
||||
self.copy_op(op, first.into())?;
|
||||
|
||||
if length > 1 {
|
||||
// copy the rest
|
||||
let (dest, dest_align) = first.to_scalar_ptr_align();
|
||||
let rest = dest.ptr_offset(first.layout.size, self)?;
|
||||
let elem_size = first.layout.size;
|
||||
// Copy the rest. This is performance-sensitive code
|
||||
// for big static/const arrays!
|
||||
let rest_ptr = first_ptr.offset(elem_size, self)?;
|
||||
self.memory.copy_repeatedly(
|
||||
dest, dest_align, rest, dest_align, first.layout.size, length - 1, true
|
||||
first_ptr, rest_ptr, elem_size, length - 1, /*nonoverlapping:*/true
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
None => {
|
||||
// Unsized self.
|
||||
args[0].to_mem_place()
|
||||
args[0].assert_mem_place()
|
||||
}
|
||||
};
|
||||
// Find and consult vtable
|
||||
|
@ -440,9 +440,16 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check if we have encountered this pointer+layout combination
|
||||
// before. Proceed recursively even for ZST, no
|
||||
// reason to skip them! E.g., `!` is a ZST and we want to validate it.
|
||||
// Proceed recursively even for ZST, no reason to skip them!
|
||||
// `!` is a ZST and we want to validate it.
|
||||
// Normalize before handing `place` to tracking because that will
|
||||
// check for duplicates.
|
||||
let place = if size.bytes() > 0 {
|
||||
self.ecx.force_mplace_ptr(place)
|
||||
.expect("we already bounds-checked")
|
||||
} else {
|
||||
place
|
||||
};
|
||||
let path = &self.path;
|
||||
ref_tracking.track(place, || {
|
||||
// We need to clone the path anyway, make sure it gets created
|
||||
@ -548,7 +555,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
) -> InterpResult<'tcx> {
|
||||
match op.layout.ty.sty {
|
||||
ty::Str => {
|
||||
let mplace = op.to_mem_place(); // strings are never immediate
|
||||
let mplace = op.assert_mem_place(); // strings are never immediate
|
||||
try_validation!(self.ecx.read_str(mplace),
|
||||
"uninitialized or non-UTF-8 data in str", self.path);
|
||||
}
|
||||
@ -565,7 +572,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
return Ok(());
|
||||
}
|
||||
// non-ZST array cannot be immediate, slices are never immediate
|
||||
let mplace = op.to_mem_place();
|
||||
let mplace = op.assert_mem_place();
|
||||
// This is the length of the array/slice.
|
||||
let len = mplace.len(self.ecx)?;
|
||||
// zero length slices have nothing to be checked
|
||||
@ -576,7 +583,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
let ty_size = self.ecx.layout_of(tys)?.size;
|
||||
// This is the size in bytes of the whole array.
|
||||
let size = ty_size * len;
|
||||
|
||||
// Size is not 0, get a pointer.
|
||||
let ptr = self.ecx.force_ptr(mplace.ptr)?;
|
||||
|
||||
// NOTE: Keep this in sync with the handling of integer and float
|
||||
@ -633,7 +640,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// `ref_tracking_for_consts` can be `None` to avoid recursive checking below references.
|
||||
/// This also toggles between "run-time" (no recursion) and "compile-time" (with recursion)
|
||||
/// validation (e.g., pointer values are fine in integers at runtime) and various other const
|
||||
/// specific validation checks
|
||||
/// specific validation checks.
|
||||
pub fn validate_operand(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
@ -652,6 +659,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
ecx: self,
|
||||
};
|
||||
|
||||
// Try to cast to ptr *once* instead of all the time.
|
||||
let op = self.force_op_ptr(op).unwrap_or(op);
|
||||
|
||||
// Run it
|
||||
visitor.visit_value(op)
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ macro_rules! make_value_visitor {
|
||||
match v.layout().ty.sty {
|
||||
ty::Dynamic(..) => {
|
||||
// immediate trait objects are not a thing
|
||||
let dest = v.to_op(self.ecx())?.to_mem_place();
|
||||
let dest = v.to_op(self.ecx())?.assert_mem_place();
|
||||
let inner = self.ecx().unpack_dyn_trait(dest)?.1;
|
||||
trace!("walk_value: dyn object layout: {:#?}", inner.layout);
|
||||
// recurse with the inner type
|
||||
@ -316,7 +316,7 @@ macro_rules! make_value_visitor {
|
||||
MPlaceTy::dangling(v.layout(), self.ecx())
|
||||
} else {
|
||||
// non-ZST array/slice/str cannot be immediate
|
||||
v.to_op(self.ecx())?.to_mem_place()
|
||||
v.to_op(self.ecx())?.assert_mem_place()
|
||||
};
|
||||
// Now we can go over all the fields.
|
||||
let iter = self.ecx().mplace_array_fields(mplace)?
|
||||
|
Loading…
x
Reference in New Issue
Block a user