interpret: make read functions generic over operand type
This commit is contained in:
parent
00fb45dccd
commit
77ff1b83cd
@ -216,7 +216,7 @@ fn hook_special_const_fn(
|
||||
|
||||
let mut msg_place = self.deref_operand(&args[0])?;
|
||||
while msg_place.layout.ty.is_ref() {
|
||||
msg_place = self.deref_operand(&msg_place.into())?;
|
||||
msg_place = self.deref_operand(&msg_place)?;
|
||||
}
|
||||
|
||||
let msg = Symbol::intern(self.read_str(&msg_place)?);
|
||||
|
@ -86,7 +86,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
|
||||
Ok(ty::ValTree::zst())
|
||||
}
|
||||
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
|
||||
let Ok(val) = ecx.read_immediate(&place.into()) else {
|
||||
let Ok(val) = ecx.read_immediate(place) else {
|
||||
return Err(ValTreeCreationError::Other);
|
||||
};
|
||||
let val = val.to_scalar();
|
||||
@ -102,7 +102,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
|
||||
ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
|
||||
|
||||
ty::Ref(_, _, _) => {
|
||||
let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
|
||||
let Ok(derefd_place)= ecx.deref_operand(place) else {
|
||||
return Err(ValTreeCreationError::Other);
|
||||
};
|
||||
debug!(?derefd_place);
|
||||
@ -130,7 +130,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
|
||||
bug!("uninhabited types should have errored and never gotten converted to valtree")
|
||||
}
|
||||
|
||||
let Ok(variant) = ecx.read_discriminant(&place.into()) else {
|
||||
let Ok(variant) = ecx.read_discriminant(place) else {
|
||||
return Err(ValTreeCreationError::Other);
|
||||
};
|
||||
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
|
||||
|
@ -56,7 +56,7 @@ pub fn cast(
|
||||
}
|
||||
|
||||
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
|
||||
let src = self.read_immediate(&src)?;
|
||||
let src = self.read_immediate(src)?;
|
||||
let res = self.ptr_to_ptr(&src, cast_ty)?;
|
||||
self.write_immediate(res, dest)?;
|
||||
}
|
||||
|
@ -5,8 +5,7 @@
|
||||
use rustc_target::abi::{self, TagEncoding};
|
||||
use rustc_target::abi::{VariantIdx, Variants};
|
||||
|
||||
use super::place::Writeable;
|
||||
use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, Scalar};
|
||||
use super::{ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Writes the discriminant of the given variant.
|
||||
@ -97,11 +96,12 @@ pub fn write_discriminant(
|
||||
#[instrument(skip(self), level = "trace")]
|
||||
pub fn read_discriminant(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::Provenance>,
|
||||
op: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, VariantIdx> {
|
||||
trace!("read_discriminant_value {:#?}", op.layout);
|
||||
let ty = op.layout().ty;
|
||||
trace!("read_discriminant_value {:#?}", op.layout());
|
||||
// Get type and layout of the discriminant.
|
||||
let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
|
||||
let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
|
||||
trace!("discriminant type: {:?}", discr_layout.ty);
|
||||
|
||||
// We use "discriminant" to refer to the value associated with a particular enum variant.
|
||||
@ -109,20 +109,19 @@ pub fn read_discriminant(
|
||||
// declared list of variants -- they can differ with explicitly assigned discriminants.
|
||||
// We use "tag" to refer to how the discriminant is encoded in memory, which can be either
|
||||
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
|
||||
let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
|
||||
let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants {
|
||||
Variants::Single { index } => {
|
||||
// Do some extra checks on enums.
|
||||
if op.layout.ty.is_enum() {
|
||||
if ty.is_enum() {
|
||||
// Hilariously, `Single` is used even for 0-variant enums.
|
||||
// (See https://github.com/rust-lang/rust/issues/89765).
|
||||
if matches!(op.layout.ty.kind(), ty::Adt(def, ..) if def.variants().is_empty())
|
||||
{
|
||||
if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
|
||||
throw_ub!(UninhabitedEnumVariantRead(index))
|
||||
}
|
||||
// For consisteny with `write_discriminant`, and to make sure that
|
||||
// `project_downcast` cannot fail due to strange layouts, we declare immediate UB
|
||||
// for uninhabited variants.
|
||||
if op.layout.for_variant(self, index).abi.is_uninhabited() {
|
||||
if op.layout().for_variant(self, index).abi.is_uninhabited() {
|
||||
throw_ub!(UninhabitedEnumVariantRead(index))
|
||||
}
|
||||
}
|
||||
@ -168,7 +167,7 @@ pub fn read_discriminant(
|
||||
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
|
||||
let discr_bits = discr_val.assert_bits(discr_layout.size);
|
||||
// Convert discriminant to variant index, and catch invalid discriminants.
|
||||
let index = match *op.layout.ty.kind() {
|
||||
let index = match *ty.kind() {
|
||||
ty::Adt(adt, _) => {
|
||||
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
|
||||
}
|
||||
@ -222,12 +221,8 @@ pub fn read_discriminant(
|
||||
.checked_add(variant_index_relative)
|
||||
.expect("overflow computing absolute variant idx"),
|
||||
);
|
||||
let variants = op
|
||||
.layout
|
||||
.ty
|
||||
.ty_adt_def()
|
||||
.expect("tagged layout for non adt")
|
||||
.variants();
|
||||
let variants =
|
||||
ty.ty_adt_def().expect("tagged layout for non adt").variants();
|
||||
assert!(variant_index < variants.next_index());
|
||||
variant_index
|
||||
} else {
|
||||
@ -242,7 +237,7 @@ pub fn read_discriminant(
|
||||
}
|
||||
};
|
||||
// For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants.
|
||||
if op.layout.for_variant(self, index).abi.is_uninhabited() {
|
||||
if op.layout().for_variant(self, index).abi.is_uninhabited() {
|
||||
throw_ub!(UninhabitedEnumVariantRead(index))
|
||||
}
|
||||
Ok(index)
|
||||
|
@ -170,7 +170,7 @@ fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let tcx = self.ecx.tcx;
|
||||
let ty = mplace.layout.ty;
|
||||
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
|
||||
let value = self.ecx.read_immediate(&mplace.into())?;
|
||||
let value = self.ecx.read_immediate(mplace)?;
|
||||
let mplace = self.ecx.ref_to_mplace(&value)?;
|
||||
assert_eq!(mplace.layout.ty, referenced_ty);
|
||||
// Handle trait object vtables.
|
||||
|
@ -226,7 +226,7 @@ pub fn emulate_intrinsic(
|
||||
}
|
||||
sym::discriminant_value => {
|
||||
let place = self.deref_operand(&args[0])?;
|
||||
let variant = self.read_discriminant(&place.into())?;
|
||||
let variant = self.read_discriminant(&place)?;
|
||||
let discr = self.discriminant_for_variant(place.layout, variant)?;
|
||||
self.write_scalar(discr, dest)?;
|
||||
}
|
||||
@ -445,7 +445,7 @@ pub fn emulate_intrinsic(
|
||||
input_len
|
||||
);
|
||||
self.copy_op(
|
||||
&self.project_index(&input, index)?.into(),
|
||||
&self.project_index(&input, index)?,
|
||||
dest,
|
||||
/*allow_transmute*/ false,
|
||||
)?;
|
||||
@ -610,7 +610,7 @@ pub(crate) fn copy_intrinsic(
|
||||
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
|
||||
nonoverlapping: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
let count = self.read_target_usize(&count)?;
|
||||
let count = self.read_target_usize(count)?;
|
||||
let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
|
||||
let (size, align) = (layout.size, layout.align.abi);
|
||||
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
|
||||
@ -622,8 +622,8 @@ pub(crate) fn copy_intrinsic(
|
||||
)
|
||||
})?;
|
||||
|
||||
let src = self.read_pointer(&src)?;
|
||||
let dst = self.read_pointer(&dst)?;
|
||||
let src = self.read_pointer(src)?;
|
||||
let dst = self.read_pointer(dst)?;
|
||||
|
||||
self.mem_copy(src, align, dst, align, size, nonoverlapping)
|
||||
}
|
||||
@ -636,9 +636,9 @@ pub(crate) fn write_bytes_intrinsic(
|
||||
) -> InterpResult<'tcx> {
|
||||
let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
|
||||
|
||||
let dst = self.read_pointer(&dst)?;
|
||||
let byte = self.read_scalar(&byte)?.to_u8()?;
|
||||
let count = self.read_target_usize(&count)?;
|
||||
let dst = self.read_pointer(dst)?;
|
||||
let byte = self.read_scalar(byte)?.to_u8()?;
|
||||
let count = self.read_target_usize(count)?;
|
||||
|
||||
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
|
||||
// but no actual allocation can be big enough for the difference to be noticeable.
|
||||
|
@ -24,7 +24,7 @@
|
||||
pub use self::intern::{intern_const_alloc_recursive, InternKind};
|
||||
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
|
||||
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
|
||||
pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
|
||||
pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
|
||||
pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
|
||||
pub use self::projection::Projectable;
|
||||
pub use self::terminator::FnArg;
|
||||
|
@ -180,20 +180,6 @@ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
|
||||
OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
|
||||
OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn from(val: ImmTy<'tcx, Prov>) -> Self {
|
||||
@ -201,20 +187,6 @@ fn from(val: ImmTy<'tcx, Prov>) -> Self {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> From<&'_ ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn from(val: &ImmTy<'tcx, Prov>) -> Self {
|
||||
OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> From<&'_ mut ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn from(val: &mut ImmTy<'tcx, Prov>) -> Self {
|
||||
OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
#[inline]
|
||||
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
@ -341,7 +313,7 @@ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
|
||||
&self,
|
||||
_ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
Ok(self.into())
|
||||
Ok(self.clone().into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -400,6 +372,31 @@ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
|
||||
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
|
||||
self.as_mplace_or_imm()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
|
||||
Left(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
|
||||
#[inline(always)]
|
||||
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
|
||||
Right(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
|
||||
/// Returns `None` if the layout does not permit loading this as a value.
|
||||
@ -472,7 +469,7 @@ fn read_immediate_from_mplace_raw(
|
||||
/// ConstProp needs it, though.
|
||||
pub fn read_immediate_raw(
|
||||
&self,
|
||||
src: &OpTy<'tcx, M::Provenance>,
|
||||
src: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
|
||||
Ok(match src.as_mplace_or_imm() {
|
||||
Left(ref mplace) => {
|
||||
@ -492,14 +489,18 @@ pub fn read_immediate_raw(
|
||||
#[inline(always)]
|
||||
pub fn read_immediate(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::Provenance>,
|
||||
op: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
|
||||
if !matches!(
|
||||
op.layout.abi,
|
||||
op.layout().abi,
|
||||
Abi::Scalar(abi::Scalar::Initialized { .. })
|
||||
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
|
||||
) {
|
||||
span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"primitive read not possible for type: {:?}",
|
||||
op.layout().ty
|
||||
);
|
||||
}
|
||||
let imm = self.read_immediate_raw(op)?.right().unwrap();
|
||||
if matches!(*imm, Immediate::Uninit) {
|
||||
@ -511,7 +512,7 @@ pub fn read_immediate(
|
||||
/// Read a scalar from a place
|
||||
pub fn read_scalar(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::Provenance>,
|
||||
op: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
|
||||
Ok(self.read_immediate(op)?.to_scalar())
|
||||
}
|
||||
@ -522,16 +523,22 @@ pub fn read_scalar(
|
||||
/// Read a pointer from a place.
|
||||
pub fn read_pointer(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::Provenance>,
|
||||
op: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
|
||||
self.read_scalar(op)?.to_pointer(self)
|
||||
}
|
||||
/// Read a pointer-sized unsigned integer from a place.
|
||||
pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
|
||||
pub fn read_target_usize(
|
||||
&self,
|
||||
op: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, u64> {
|
||||
self.read_scalar(op)?.to_target_usize(self)
|
||||
}
|
||||
/// Read a pointer-sized signed integer from a place.
|
||||
pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
|
||||
pub fn read_target_isize(
|
||||
&self,
|
||||
op: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, i64> {
|
||||
self.read_scalar(op)?.to_target_isize(self)
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
use super::{
|
||||
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
|
||||
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
|
||||
Pointer, Projectable, Provenance, Scalar,
|
||||
Pointer, Projectable, Provenance, Readable, Scalar,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
|
||||
@ -246,7 +246,7 @@ fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
|
||||
&self,
|
||||
_ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
Ok(self.into())
|
||||
Ok(self.clone().into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -442,7 +442,7 @@ pub fn ref_to_mplace(
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
pub fn deref_operand(
|
||||
&self,
|
||||
src: &OpTy<'tcx, M::Provenance>,
|
||||
src: &impl Readable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
|
||||
let val = self.read_immediate(src)?;
|
||||
trace!("deref to {} on {:?}", val.layout.ty, *val);
|
||||
@ -766,7 +766,7 @@ pub fn write_uninit(
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
pub fn copy_op(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::Provenance>,
|
||||
src: &impl Readable<'tcx, M::Provenance>,
|
||||
dest: &impl Writeable<'tcx, M::Provenance>,
|
||||
allow_transmute: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
@ -787,19 +787,19 @@ pub fn copy_op(
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
fn copy_op_no_validate(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::Provenance>,
|
||||
src: &impl Readable<'tcx, M::Provenance>,
|
||||
dest: &impl Writeable<'tcx, M::Provenance>,
|
||||
allow_transmute: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
// We do NOT compare the types for equality, because well-typed code can
|
||||
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
|
||||
let layout_compat =
|
||||
mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout());
|
||||
mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
|
||||
if !allow_transmute && !layout_compat {
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
|
||||
src.layout.ty,
|
||||
src.layout().ty,
|
||||
dest.layout().ty,
|
||||
);
|
||||
}
|
||||
@ -813,13 +813,13 @@ fn copy_op_no_validate(
|
||||
// actually sized, due to a trivially false where-clause
|
||||
// predicate like `where Self: Sized` with `Self = dyn Trait`.
|
||||
// See #102553 for an example of such a predicate.
|
||||
if src.layout.is_unsized() {
|
||||
throw_inval!(SizeOfUnsizedType(src.layout.ty));
|
||||
if src.layout().is_unsized() {
|
||||
throw_inval!(SizeOfUnsizedType(src.layout().ty));
|
||||
}
|
||||
if dest.layout().is_unsized() {
|
||||
throw_inval!(SizeOfUnsizedType(dest.layout().ty));
|
||||
}
|
||||
assert_eq!(src.layout.size, dest.layout().size);
|
||||
assert_eq!(src.layout().size, dest.layout().size);
|
||||
// Yay, we got a value that we can write directly.
|
||||
return if layout_compat {
|
||||
self.write_immediate_no_validate(*src_val, dest)
|
||||
@ -831,7 +831,7 @@ fn copy_op_no_validate(
|
||||
let dest_mem = dest.force_mplace(self)?;
|
||||
self.write_immediate_to_mplace_no_validate(
|
||||
*src_val,
|
||||
src.layout,
|
||||
src.layout(),
|
||||
dest_mem.align,
|
||||
*dest_mem,
|
||||
)
|
||||
|
@ -494,7 +494,7 @@ fn check_assertion(
|
||||
trace!("assertion on {:?} should be {:?}", value, expected);
|
||||
|
||||
let expected = Scalar::from_bool(expected);
|
||||
let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(&value))?;
|
||||
let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(value))?;
|
||||
|
||||
if expected != value_const {
|
||||
// Poison all places this operand references so that further code
|
||||
@ -664,7 +664,7 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location
|
||||
}
|
||||
TerminatorKind::SwitchInt { ref discr, ref targets } => {
|
||||
if let Some(ref value) = self.eval_operand(&discr, location)
|
||||
&& let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(&value))
|
||||
&& let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(value))
|
||||
&& let Ok(constant) = value_const.try_to_int()
|
||||
&& let Ok(constant) = constant.to_bits(constant.size())
|
||||
{
|
||||
|
@ -472,7 +472,7 @@ fn read_scalar_atomic(
|
||||
// This is fine with StackedBorrow and race checks because they don't concern metadata on
|
||||
// the *value* (including the associated provenance if this is an AtomicPtr) at this location.
|
||||
// Only metadata on the location itself is used.
|
||||
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
|
||||
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.buffered_atomic_read(place, atomic, scalar, || {
|
||||
this.validate_atomic_load(place, atomic)
|
||||
@ -513,7 +513,7 @@ fn atomic_op_immediate(
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
|
||||
|
||||
// Atomics wrap around on overflow.
|
||||
let val = this.binary_op(op, &old, rhs)?;
|
||||
@ -538,7 +538,7 @@ fn atomic_exchange_scalar(
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
|
||||
this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
|
||||
|
||||
this.validate_atomic_rmw(place, atomic)?;
|
||||
@ -560,7 +560,7 @@ fn atomic_min_max_scalar(
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
|
||||
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
|
||||
|
||||
let new_val = if min {
|
||||
@ -603,7 +603,7 @@ fn atomic_compare_exchange_scalar(
|
||||
// to read with the failure ordering and if successful then try again with the success
|
||||
// read ordering and write in the success case.
|
||||
// Read as immediate for the sake of `binary_op()`
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&(place.into())))?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
|
||||
// `binary_op` will bail if either of them is not a scalar.
|
||||
let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
|
||||
// If the operation would succeed, but is "weak", fail some portion
|
||||
|
@ -246,7 +246,7 @@ fn on_main_stack_empty<'tcx>(
|
||||
this.machine.main_fn_ret_place.unwrap().ptr,
|
||||
this.machine.layouts.isize,
|
||||
);
|
||||
let exit_code = this.read_target_isize(&ret_place.into())?;
|
||||
let exit_code = this.read_target_isize(&ret_place)?;
|
||||
// Need to call this ourselves since we are not going to return to the scheduler
|
||||
// loop, and we want the main thread TLS to not show up as memory leaks.
|
||||
this.terminate_active_thread()?;
|
||||
|
@ -166,7 +166,7 @@ fn eval_path_scalar(&self, path: &[&str]) -> Scalar<Provenance> {
|
||||
let const_val = this.eval_global(cid, None).unwrap_or_else(|err| {
|
||||
panic!("failed to evaluate required Rust item: {path:?}\n{err:?}")
|
||||
});
|
||||
this.read_scalar(&const_val.into())
|
||||
this.read_scalar(&const_val)
|
||||
.unwrap_or_else(|err| panic!("failed to read required Rust item: {path:?}\n{err:?}"))
|
||||
}
|
||||
|
||||
@ -623,7 +623,7 @@ fn set_last_error(&mut self, scalar: Scalar<Provenance>) -> InterpResult<'tcx> {
|
||||
fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar<Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
let errno_place = this.last_error_place()?;
|
||||
this.read_scalar(&errno_place.into())
|
||||
this.read_scalar(&errno_place)
|
||||
}
|
||||
|
||||
/// This function tries to produce the most similar OS error from the `std::io::ErrorKind`
|
||||
@ -772,7 +772,7 @@ fn read_scalar_at_offset(
|
||||
) -> InterpResult<'tcx, Scalar<Provenance>> {
|
||||
let this = self.eval_context_ref();
|
||||
let value_place = this.deref_operand_and_offset(op, offset, base_layout, value_layout)?;
|
||||
this.read_scalar(&value_place.into())
|
||||
this.read_scalar(&value_place)
|
||||
}
|
||||
|
||||
fn write_scalar_at_offset(
|
||||
@ -797,10 +797,10 @@ fn read_timespec(
|
||||
) -> InterpResult<'tcx, Option<Duration>> {
|
||||
let this = self.eval_context_mut();
|
||||
let seconds_place = this.project_field(tp, 0)?;
|
||||
let seconds_scalar = this.read_scalar(&seconds_place.into())?;
|
||||
let seconds_scalar = this.read_scalar(&seconds_place)?;
|
||||
let seconds = seconds_scalar.to_target_isize(this)?;
|
||||
let nanoseconds_place = this.project_field(tp, 1)?;
|
||||
let nanoseconds_scalar = this.read_scalar(&nanoseconds_place.into())?;
|
||||
let nanoseconds_scalar = this.read_scalar(&nanoseconds_place)?;
|
||||
let nanoseconds = nanoseconds_scalar.to_target_isize(this)?;
|
||||
|
||||
Ok(try {
|
||||
|
@ -88,7 +88,7 @@ pub(crate) fn cleanup<'mir>(
|
||||
}
|
||||
// Deallocate environ var list.
|
||||
let environ = ecx.machine.env_vars.environ.unwrap();
|
||||
let old_vars_ptr = ecx.read_pointer(&environ.into())?;
|
||||
let old_vars_ptr = ecx.read_pointer(&environ)?;
|
||||
ecx.deallocate_ptr(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
Ok(())
|
||||
}
|
||||
@ -432,7 +432,7 @@ fn update_environ(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
// Deallocate the old environ list, if any.
|
||||
if let Some(environ) = this.machine.env_vars.environ {
|
||||
let old_vars_ptr = this.read_pointer(&environ.into())?;
|
||||
let old_vars_ptr = this.read_pointer(&environ)?;
|
||||
this.deallocate_ptr(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
} else {
|
||||
// No `environ` allocated yet, let's do that.
|
||||
|
@ -97,7 +97,7 @@ fn emulate_intrinsic_by_name(
|
||||
"volatile_load" => {
|
||||
let [place] = check_arg_count(args)?;
|
||||
let place = this.deref_operand(place)?;
|
||||
this.copy_op(&place.into(), dest, /*allow_transmute*/ false)?;
|
||||
this.copy_op(&place, dest, /*allow_transmute*/ false)?;
|
||||
}
|
||||
"volatile_store" => {
|
||||
let [place, dest] = check_arg_count(args)?;
|
||||
|
@ -57,7 +57,7 @@ enum Op {
|
||||
};
|
||||
|
||||
for i in 0..dest_len {
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?)?;
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
let val = match which {
|
||||
Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar(),
|
||||
@ -172,8 +172,8 @@ enum Op {
|
||||
};
|
||||
|
||||
for i in 0..dest_len {
|
||||
let left = this.read_immediate(&this.project_index(&left, i)?.into())?;
|
||||
let right = this.read_immediate(&this.project_index(&right, i)?.into())?;
|
||||
let left = this.read_immediate(&this.project_index(&left, i)?)?;
|
||||
let right = this.read_immediate(&this.project_index(&right, i)?)?;
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
let val = match which {
|
||||
Op::MirOp(mir_op) => {
|
||||
@ -232,9 +232,9 @@ enum Op {
|
||||
assert_eq!(dest_len, c_len);
|
||||
|
||||
for i in 0..dest_len {
|
||||
let a = this.read_scalar(&this.project_index(&a, i)?.into())?;
|
||||
let b = this.read_scalar(&this.project_index(&b, i)?.into())?;
|
||||
let c = this.read_scalar(&this.project_index(&c, i)?.into())?;
|
||||
let a = this.read_scalar(&this.project_index(&a, i)?)?;
|
||||
let b = this.read_scalar(&this.project_index(&b, i)?)?;
|
||||
let c = this.read_scalar(&this.project_index(&c, i)?)?;
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
|
||||
// Works for f32 and f64.
|
||||
@ -295,13 +295,13 @@ enum Op {
|
||||
};
|
||||
|
||||
// Initialize with first lane, then proceed with the rest.
|
||||
let mut res = this.read_immediate(&this.project_index(&op, 0)?.into())?;
|
||||
let mut res = this.read_immediate(&this.project_index(&op, 0)?)?;
|
||||
if matches!(which, Op::MirOpBool(_)) {
|
||||
// Convert to `bool` scalar.
|
||||
res = imm_from_bool(simd_element_to_bool(res)?);
|
||||
}
|
||||
for i in 1..op_len {
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?)?;
|
||||
res = match which {
|
||||
Op::MirOp(mir_op) => {
|
||||
this.binary_op(mir_op, &res, &op)?
|
||||
@ -355,7 +355,7 @@ enum Op {
|
||||
|
||||
let mut res = init;
|
||||
for i in 0..op_len {
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?)?;
|
||||
res = this.binary_op(mir_op, &res, &op)?;
|
||||
}
|
||||
this.write_immediate(*res, dest)?;
|
||||
@ -372,9 +372,9 @@ enum Op {
|
||||
assert_eq!(dest_len, no_len);
|
||||
|
||||
for i in 0..dest_len {
|
||||
let mask = this.read_immediate(&this.project_index(&mask, i)?.into())?;
|
||||
let yes = this.read_immediate(&this.project_index(&yes, i)?.into())?;
|
||||
let no = this.read_immediate(&this.project_index(&no, i)?.into())?;
|
||||
let mask = this.read_immediate(&this.project_index(&mask, i)?)?;
|
||||
let yes = this.read_immediate(&this.project_index(&yes, i)?)?;
|
||||
let no = this.read_immediate(&this.project_index(&no, i)?)?;
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
|
||||
let val = if simd_element_to_bool(mask)? { yes } else { no };
|
||||
@ -403,8 +403,8 @@ enum Op {
|
||||
& 1u64
|
||||
.checked_shl(simd_bitmask_index(i, dest_len, this.data_layout().endian))
|
||||
.unwrap();
|
||||
let yes = this.read_immediate(&this.project_index(&yes, i.into())?.into())?;
|
||||
let no = this.read_immediate(&this.project_index(&no, i.into())?.into())?;
|
||||
let yes = this.read_immediate(&this.project_index(&yes, i.into())?)?;
|
||||
let no = this.read_immediate(&this.project_index(&no, i.into())?)?;
|
||||
let dest = this.project_index(&dest, i.into())?;
|
||||
|
||||
let val = if mask != 0 { yes } else { no };
|
||||
@ -435,7 +435,7 @@ enum Op {
|
||||
let from_exposed_cast = intrinsic_name == "from_exposed_addr";
|
||||
|
||||
for i in 0..dest_len {
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
|
||||
let op = this.read_immediate(&this.project_index(&op, i)?)?;
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
|
||||
let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
|
||||
@ -503,10 +503,10 @@ enum Op {
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
|
||||
let val = if src_index < left_len {
|
||||
this.read_immediate(&this.project_index(&left, src_index)?.into())?
|
||||
this.read_immediate(&this.project_index(&left, src_index)?)?
|
||||
} else if src_index < left_len.checked_add(right_len).unwrap() {
|
||||
let right_idx = src_index.checked_sub(left_len).unwrap();
|
||||
this.read_immediate(&this.project_index(&right, right_idx)?.into())?
|
||||
this.read_immediate(&this.project_index(&right, right_idx)?)?
|
||||
} else {
|
||||
span_bug!(
|
||||
this.cur_span(),
|
||||
@ -528,14 +528,14 @@ enum Op {
|
||||
assert_eq!(dest_len, mask_len);
|
||||
|
||||
for i in 0..dest_len {
|
||||
let passthru = this.read_immediate(&this.project_index(&passthru, i)?.into())?;
|
||||
let ptr = this.read_immediate(&this.project_index(&ptrs, i)?.into())?;
|
||||
let mask = this.read_immediate(&this.project_index(&mask, i)?.into())?;
|
||||
let passthru = this.read_immediate(&this.project_index(&passthru, i)?)?;
|
||||
let ptr = this.read_immediate(&this.project_index(&ptrs, i)?)?;
|
||||
let mask = this.read_immediate(&this.project_index(&mask, i)?)?;
|
||||
let dest = this.project_index(&dest, i)?;
|
||||
|
||||
let val = if simd_element_to_bool(mask)? {
|
||||
let place = this.deref_operand(&ptr.into())?;
|
||||
this.read_immediate(&place.into())?
|
||||
let place = this.deref_operand(&ptr)?;
|
||||
this.read_immediate(&place)?
|
||||
} else {
|
||||
passthru
|
||||
};
|
||||
@ -552,12 +552,12 @@ enum Op {
|
||||
assert_eq!(ptrs_len, mask_len);
|
||||
|
||||
for i in 0..ptrs_len {
|
||||
let value = this.read_immediate(&this.project_index(&value, i)?.into())?;
|
||||
let ptr = this.read_immediate(&this.project_index(&ptrs, i)?.into())?;
|
||||
let mask = this.read_immediate(&this.project_index(&mask, i)?.into())?;
|
||||
let value = this.read_immediate(&this.project_index(&value, i)?)?;
|
||||
let ptr = this.read_immediate(&this.project_index(&ptrs, i)?)?;
|
||||
let mask = this.read_immediate(&this.project_index(&mask, i)?)?;
|
||||
|
||||
if simd_element_to_bool(mask)? {
|
||||
let place = this.deref_operand(&ptr.into())?;
|
||||
let place = this.deref_operand(&ptr)?;
|
||||
this.write_immediate(*value, &place)?;
|
||||
}
|
||||
}
|
||||
@ -578,7 +578,7 @@ enum Op {
|
||||
|
||||
let mut res = 0u64;
|
||||
for i in 0..op_len {
|
||||
let op = this.read_immediate(&this.project_index(&op, i.into())?.into())?;
|
||||
let op = this.read_immediate(&this.project_index(&op, i.into())?)?;
|
||||
if simd_element_to_bool(op)? {
|
||||
res |= 1u64
|
||||
.checked_shl(simd_bitmask_index(i, op_len, this.data_layout().endian))
|
||||
|
@ -74,9 +74,9 @@ fn epoll_ctl(
|
||||
let event = this.deref_operand_as(event, this.libc_ty_layout("epoll_event"))?;
|
||||
|
||||
let events = this.project_field(&event, 0)?;
|
||||
let events = this.read_scalar(&events.into())?.to_u32()?;
|
||||
let events = this.read_scalar(&events)?.to_u32()?;
|
||||
let data = this.project_field(&event, 1)?;
|
||||
let data = this.read_scalar(&data.into())?;
|
||||
let data = this.read_scalar(&data)?;
|
||||
let event = EpollEvent { events, data };
|
||||
|
||||
if let Some(epfd) = this.machine.file_handler.handles.get_mut(&epfd) {
|
||||
|
@ -133,7 +133,7 @@ fn emulate_foreign_item_by_name(
|
||||
let (written, size_needed) = this.write_path_to_c_str(
|
||||
&path,
|
||||
buf_ptr,
|
||||
this.read_scalar(&bufsize.into())?.to_u32()?.into(),
|
||||
this.read_scalar(&bufsize)?.to_u32()?.into(),
|
||||
)?;
|
||||
|
||||
if written {
|
||||
|
@ -323,7 +323,7 @@ fn WaitOnAddress(
|
||||
let layout = this.machine.layouts.uint(size).unwrap();
|
||||
let futex_val = this
|
||||
.read_scalar_atomic(&MPlaceTy::from_aligned_ptr(ptr, layout), AtomicReadOrd::Relaxed)?;
|
||||
let compare_val = this.read_scalar(&MPlaceTy::from_aligned_ptr(compare, layout).into())?;
|
||||
let compare_val = this.read_scalar(&MPlaceTy::from_aligned_ptr(compare, layout))?;
|
||||
|
||||
if futex_val == compare_val {
|
||||
// If the values are the same, we have to block.
|
||||
|
Loading…
Reference in New Issue
Block a user