interpret: more consistently use ImmTy in operators and casts

This commit is contained in:
Ralf Jung 2023-09-20 21:49:30 +02:00
parent 4f226925ce
commit da08a3f40c
21 changed files with 200 additions and 189 deletions

View File

@ -3,7 +3,7 @@
use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::INVALID_ALIGNMENT;
use std::borrow::Borrow;
use std::hash::Hash;
@ -596,7 +596,7 @@ fn binary_ptr_op(
_bin_op: mir::BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
}

View File

@ -34,31 +34,31 @@ pub fn cast(
CastKind::PointerExposeAddress => {
let src = self.read_immediate(src)?;
let res = self.pointer_expose_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
self.write_immediate(*res, dest)?;
}
CastKind::PointerFromExposedAddress => {
let src = self.read_immediate(src)?;
let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
self.write_immediate(*res, dest)?;
}
CastKind::IntToInt | CastKind::IntToFloat => {
let src = self.read_immediate(src)?;
let res = self.int_to_int_or_float(&src, cast_ty)?;
self.write_immediate(res, dest)?;
self.write_immediate(*res, dest)?;
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let src = self.read_immediate(src)?;
let res = self.float_to_float_or_int(&src, cast_ty)?;
self.write_immediate(res, dest)?;
self.write_immediate(*res, dest)?;
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.read_immediate(src)?;
let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?;
self.write_immediate(*res, dest)?;
}
CastKind::PointerCoercion(
@ -165,11 +165,15 @@ pub fn int_to_int_or_float(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into())
let layout = self.layout_of(cast_ty)?;
Ok(ImmTy::from_scalar(
self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?,
layout,
))
}
/// Handles 'FloatToFloat' and 'FloatToInt' casts.
@ -177,21 +181,19 @@ pub fn float_to_float_or_int(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_type_ir::sty::TyKind::*;
match src.layout.ty.kind() {
let layout = self.layout_of(cast_ty)?;
let val = match src.layout.ty.kind() {
// Floating point
Float(FloatTy::F32) => {
return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into());
}
Float(FloatTy::F64) => {
return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
}
Float(FloatTy::F32) => self.cast_from_float(src.to_scalar().to_f32()?, cast_ty),
Float(FloatTy::F64) => self.cast_from_float(src.to_scalar().to_f64()?, cast_ty),
_ => {
bug!("Can't cast 'Float' type into {:?}", cast_ty);
}
}
};
Ok(ImmTy::from_scalar(val, layout))
}
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
@ -199,21 +201,21 @@ pub fn ptr_to_ptr(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_any_ptr());
assert!(cast_ty.is_unsafe_ptr());
// Handle casting any ptr to raw ptr (might be a fat ptr).
let dest_layout = self.layout_of(cast_ty)?;
if dest_layout.size == src.layout.size {
// Thin or fat pointer that just hast the ptr kind of target type changed.
return Ok(**src);
return Ok(ImmTy::from_immediate(**src, dest_layout));
} else {
// Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.pointer_size());
assert_eq!(dest_layout.size, self.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr());
return match **src {
Immediate::ScalarPair(data, _) => Ok(data.into()),
Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, dest_layout)),
Immediate::Scalar(..) => span_bug!(
self.cur_span(),
"{:?} input to a fat-to-thin cast ({:?} -> {:?})",
@ -230,7 +232,7 @@ pub fn pointer_expose_address_cast(
&mut self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
assert!(cast_ty.is_integral());
@ -240,14 +242,15 @@ pub fn pointer_expose_address_cast(
Ok(ptr) => M::expose_ptr(self, ptr)?,
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
};
Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
let layout = self.layout_of(cast_ty)?;
Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_ty)?, layout))
}
pub fn pointer_from_exposed_address_cast(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral());
assert_matches!(cast_ty.kind(), ty::RawPtr(_));
@ -258,12 +261,13 @@ pub fn pointer_from_exposed_address_cast(
// Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(&self, addr)?;
Ok(Scalar::from_maybe_pointer(ptr, self).into())
let layout = self.layout_of(cast_ty)?;
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), layout))
}
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
/// type (basically everything with a scalar layout) to int/float/char types.
pub fn cast_from_int_like(
fn cast_from_int_like(
&self,
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
src_layout: TyAndLayout<'tcx>,

View File

@ -76,7 +76,7 @@ pub fn write_discriminant(
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag_val = self.binary_op(
let tag_val = self.wrapping_binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
@ -153,19 +153,18 @@ pub fn read_discriminant(
// Figure out which discriminant and variant this corresponds to.
let index = match *tag_encoding {
TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
let tag_bits = scalar
let tag_bits = tag_val
.to_scalar()
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
let discr_val =
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
let discr_bits = discr_val.assert_bits(discr_layout.size);
let discr_val = self.int_to_int_or_float(&tag_val, discr_layout.ty).unwrap();
let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *ty.kind() {
ty::Adt(adt, _) => {
@ -208,7 +207,7 @@ pub fn read_discriminant(
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.

View File

@ -307,7 +307,7 @@ pub fn emulate_intrinsic(
let dist = {
// Addresses are unsigned, so this is a `usize` computation. We have to do the
// overflow check separately anyway.
let (val, overflowed, _ty) = {
let (val, overflowed) = {
let a_offset = ImmTy::from_uint(a_offset, usize_layout);
let b_offset = ImmTy::from_uint(b_offset, usize_layout);
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
@ -324,7 +324,7 @@ pub fn emulate_intrinsic(
// The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart.
let dist = val.to_target_isize(self)?;
let dist = val.to_scalar().to_target_isize(self)?;
if dist >= 0 {
throw_ub_custom!(
fluent::const_eval_offset_from_underflow,
@ -334,7 +334,7 @@ pub fn emulate_intrinsic(
dist
} else {
// b >= a
let dist = val.to_target_isize(self)?;
let dist = val.to_scalar().to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
@ -504,9 +504,9 @@ pub fn exact_div(
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow.
if res.assert_bits(a.layout.size) != 0 {
if res.to_scalar().assert_bits(a.layout.size) != 0 {
throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"),
@ -524,7 +524,7 @@ pub fn saturating_arith(
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let size = l.layout.size;
let num_bits = size.bits();
@ -556,7 +556,7 @@ pub fn saturating_arith(
}
}
} else {
val
val.to_scalar()
})
}

View File

@ -9,7 +9,7 @@
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi;
@ -18,7 +18,7 @@
use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance, Scalar,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
};
/// Data returned by Machine::stack_pop,
@ -238,7 +238,7 @@ fn binary_ptr_op(
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
/// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked

View File

@ -8,7 +8,7 @@
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty};
use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
@ -188,6 +188,12 @@ pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
Self::from_scalar(Scalar::from_int(i, layout.size), layout)
}
#[inline]
pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
Self::from_scalar(Scalar::from_bool(b), layout)
}
#[inline]
pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral());

View File

@ -1,7 +1,7 @@
use rustc_apfloat::Float;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
@ -20,9 +20,9 @@ pub fn binop_with_overflow(
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {op:?}",
);
@ -30,7 +30,7 @@ pub fn binop_with_overflow(
if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do
// `force_allocation`).
let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed));
let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
self.write_immediate(pair, dest)?;
} else {
assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
@ -38,7 +38,7 @@ pub fn binop_with_overflow(
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
let val_field = self.project_field(dest, 0)?;
self.write_scalar(val, &val_field)?;
self.write_scalar(val.to_scalar(), &val_field)?;
let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
@ -54,9 +54,9 @@ pub fn binop_ignore_overflow(
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_scalar(val, dest)
let val = self.wrapping_binary_op(op, left, right)?;
assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_immediate(*val, dest)
}
}
@ -66,7 +66,7 @@ fn binary_char_op(
bin_op: mir::BinOp,
l: char,
r: char,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -78,7 +78,7 @@ fn binary_char_op(
Ge => l >= r,
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
};
(Scalar::from_bool(res), false, self.tcx.types.bool)
(ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_bool_op(
@ -86,7 +86,7 @@ fn binary_bool_op(
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -101,33 +101,33 @@ fn binary_bool_op(
BitXor => l ^ r,
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
};
(Scalar::from_bool(res), false, self.tcx.types.bool)
(ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
ty: Ty<'tcx>,
layout: TyAndLayout<'tcx>,
l: F,
r: F,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let (val, ty) = match bin_op {
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
Add => ((l + r).value.into(), ty),
Sub => ((l - r).value.into(), ty),
Mul => ((l * r).value.into(), ty),
Div => ((l / r).value.into(), ty),
Rem => ((l % r).value.into(), ty),
let val = match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => ImmTy::from_bool(l < r, *self.tcx),
Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => ImmTy::from_bool(l >= r, *self.tcx),
Add => ImmTy::from_scalar((l + r).value.into(), layout),
Sub => ImmTy::from_scalar((l - r).value.into(), layout),
Mul => ImmTy::from_scalar((l * r).value.into(), layout),
Div => ImmTy::from_scalar((l / r).value.into(), layout),
Rem => ImmTy::from_scalar((l % r).value.into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
(val, false, ty)
(val, false)
}
fn binary_int_op(
@ -138,7 +138,7 @@ fn binary_int_op(
left_layout: TyAndLayout<'tcx>,
r: u128,
right_layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
let throw_ub_on_overflow = match bin_op {
@ -200,7 +200,7 @@ fn binary_int_op(
);
}
return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
// For the remaining ops, the types must be the same on both sides
@ -230,7 +230,7 @@ fn binary_int_op(
if let Some(op) = op {
let l = self.sign_extend(l, left_layout) as i128;
let r = self.sign_extend(r, right_layout) as i128;
return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
}
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
Div if r == 0 => throw_ub!(DivisionByZero),
@ -267,22 +267,22 @@ fn binary_int_op(
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
}
let (val, ty) = match bin_op {
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
let val = match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
Lt => ImmTy::from_bool(l < r, *self.tcx),
Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => ImmTy::from_bool(l >= r, *self.tcx),
BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
BitOr => ImmTy::from_uint(l | r, left_layout),
BitAnd => ImmTy::from_uint(l & r, left_layout),
BitXor => ImmTy::from_uint(l ^ r, left_layout),
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
assert!(!left_layout.abi.is_signed());
@ -304,7 +304,7 @@ fn binary_int_op(
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
_ => span_bug!(
@ -317,7 +317,7 @@ fn binary_int_op(
),
};
Ok((val, false, ty))
Ok((val, false))
}
fn binary_ptr_op(
@ -325,7 +325,7 @@ fn binary_ptr_op(
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
match bin_op {
@ -336,7 +336,10 @@ fn binary_ptr_op(
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty))
Ok((
ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
false,
))
}
// Fall back to machine hook so Miri can support more pointer ops.
@ -344,14 +347,13 @@ fn binary_ptr_op(
}
}
/// Returns the result of the specified operation, whether it overflowed, and
/// the result type.
/// Returns the result of the specified operation, and whether it overflowed.
pub fn overflowing_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
trace!(
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
@ -376,15 +378,15 @@ pub fn overflowing_binary_op(
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
let ty = left.layout.ty;
let layout = left.layout;
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
FloatTy::F32 => {
self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
}
FloatTy::F64 => {
self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
}
})
}
@ -423,16 +425,15 @@ pub fn overflowing_binary_op(
}
}
/// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
#[inline]
pub fn binary_op(
pub fn wrapping_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(val)
}
/// Returns the result of the specified operation, whether it overflowed, and
@ -441,7 +442,7 @@ pub fn overflowing_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
@ -455,7 +456,7 @@ pub fn overflowing_unary_op(
Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
};
Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
Ok((ImmTy::from_bool(res, *self.tcx), false))
}
ty::Float(fty) => {
let res = match (un_op, fty) {
@ -463,7 +464,7 @@ pub fn overflowing_unary_op(
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
};
Ok((res, false, layout.ty))
Ok((ImmTy::from_scalar(res, layout), false))
}
_ => {
assert!(layout.ty.is_integral());
@ -482,17 +483,18 @@ pub fn overflowing_unary_op(
(truncated, overflow || self.sign_extend(truncated, layout) != res)
}
};
Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
Ok((ImmTy::from_uint(res, layout), overflow))
}
}
}
pub fn unary_op(
#[inline]
pub fn wrapping_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
Ok(val)
}
}

View File

@ -177,7 +177,7 @@ pub fn eval_rvalue_into_place(
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, &val)?;
let val = self.wrapping_unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
}

View File

@ -98,14 +98,12 @@ pub(super) fn eval_terminator(
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
let res = self
.overflowing_binary_op(
let res = self.wrapping_binary_op(
mir::BinOp::Eq,
&discr,
&ImmTy::from_uint(const_int, discr.layout),
)?
.0;
if res.to_bool()? {
)?;
if res.to_scalar().to_bool()? {
target_block = target;
break;
}

View File

@ -210,7 +210,7 @@ fn binary_ptr_op(
_bin_op: BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
// We can't do this because aliasing of memory can differ between const eval and llvm
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
}

View File

@ -322,7 +322,7 @@ fn report_assert_as_lint(&self, source_info: &SourceInfo, lint: AssertLint<impl
fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> {
if let (val, true) = self.use_ecx(location, |this| {
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
let (_res, overflow) = this.ecx.overflowing_unary_op(op, &val)?;
Ok((val, overflow))
})? {
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is
@ -390,7 +390,7 @@ fn check_binary_op(
if let (Some(l), Some(r)) = (l, r) {
// The remaining operators are handled through `overflowing_binary_op`.
if self.use_ecx(location, |this| {
let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, &l, &r)?;
let (_res, overflow) = this.ecx.overflowing_binary_op(op, &l, &r)?;
Ok(overflow)
})? {
let source_info = self.body().source_info(location);

View File

@ -238,7 +238,7 @@ fn handle_rvalue(
FlatSet::Elem(op) => self
.ecx
.int_to_int_or_float(&op, *ty)
.map_or(FlatSet::Top, |result| self.wrap_immediate(result)),
.map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top,
}
@ -248,7 +248,7 @@ fn handle_rvalue(
FlatSet::Elem(op) => self
.ecx
.float_to_float_or_int(&op, *ty)
.map_or(FlatSet::Top, |result| self.wrap_immediate(result)),
.map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top,
}
@ -268,7 +268,7 @@ fn handle_rvalue(
Rvalue::UnaryOp(op, operand) => match self.eval_operand(operand, state) {
FlatSet::Elem(value) => self
.ecx
.unary_op(*op, &value)
.wrapping_unary_op(*op, &value)
.map_or(FlatSet::Top, |val| self.wrap_immediate(*val)),
FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top,
@ -439,7 +439,9 @@ fn binary_op(
// Both sides are known, do the actual computation.
(FlatSet::Elem(left), FlatSet::Elem(right)) => {
match self.ecx.overflowing_binary_op(op, &left, &right) {
Ok((val, overflow, _)) => (FlatSet::Elem(val), FlatSet::Elem(overflow)),
Ok((val, overflow)) => {
(FlatSet::Elem(val.to_scalar()), FlatSet::Elem(overflow))
}
_ => (FlatSet::Top, FlatSet::Top),
}
}
@ -783,8 +785,8 @@ fn binary_ptr_op(
_bin_op: BinOp,
_left: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
_right: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
) -> interpret::InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)> {
throw_unsup!(Unsupported("".into()))
) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> {
crate::const_prop::throw_machine_stop_str!("can't do pointer arithmetic");
}
fn expose_ptr(

View File

@ -516,8 +516,8 @@ fn atomic_op_immediate(
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
// Atomics wrap around on overflow.
let val = this.binary_op(op, &old, rhs)?;
let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val };
let val = this.wrapping_binary_op(op, &old, rhs)?;
let val = if neg { this.wrapping_unary_op(mir::UnOp::Not, &val)? } else { val };
this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
this.validate_atomic_rmw(place, atomic)?;
@ -561,7 +561,7 @@ fn atomic_min_max_scalar(
this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
let lt = this.wrapping_binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
let new_val = if min {
if lt { &old } else { &rhs }
@ -605,7 +605,7 @@ fn atomic_compare_exchange_scalar(
// Read as immediate for the sake of `binary_op()`
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
// `binary_op` will bail if either of them is not a scalar.
let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
let eq = this.wrapping_binary_op(mir::BinOp::Eq, &old, expect_old)?;
// If the operation would succeed, but is "weak", fail some portion
// of the time, based on `success_rate`.
let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;

View File

@ -1015,13 +1015,13 @@ fn float_to_int_checked<F>(
f: F,
dest_ty: Ty<'tcx>,
round: rustc_apfloat::Round,
) -> Option<Scalar<Provenance>>
) -> Option<ImmTy<'tcx, Provenance>>
where
F: rustc_apfloat::Float + Into<Scalar<Provenance>>,
{
let this = self.eval_context_ref();
match dest_ty.kind() {
let val = match dest_ty.kind() {
// Unsigned
ty::Uint(t) => {
let size = Integer::from_uint_ty(this, *t).size();
@ -1033,11 +1033,11 @@ fn float_to_int_checked<F>(
) {
// Floating point value is NaN (flagged with INVALID_OP) or outside the range
// of values of the integer type (flagged with OVERFLOW or UNDERFLOW).
None
return None
} else {
// Floating point value can be represented by the integer type after rounding.
// The INEXACT flag is ignored on purpose to allow rounding.
Some(Scalar::from_uint(res.value, size))
Scalar::from_uint(res.value, size)
}
}
// Signed
@ -1051,11 +1051,11 @@ fn float_to_int_checked<F>(
) {
// Floating point value is NaN (flagged with INVALID_OP) or outside the range
// of values of the integer type (flagged with OVERFLOW or UNDERFLOW).
None
return None
} else {
// Floating point value can be represented by the integer type after rounding.
// The INEXACT flag is ignored on purpose to allow rounding.
Some(Scalar::from_int(res.value, size))
Scalar::from_int(res.value, size)
}
}
// Nothing else
@ -1064,7 +1064,8 @@ fn float_to_int_checked<F>(
this.cur_span(),
"attempted float-to-int conversion with non-int output type {dest_ty:?}"
),
}
};
Some(ImmTy::from_scalar(val, this.layout_of(dest_ty).unwrap()))
}
/// Returns an integer type that is twice wide as `ty`

View File

@ -998,7 +998,7 @@ fn binary_ptr_op(
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)> {
ecx.binary_ptr_op(bin_op, left, right)
}

View File

@ -1,6 +1,6 @@
use log::trace;
use rustc_middle::{mir, ty::Ty};
use rustc_middle::mir;
use rustc_target::abi::Size;
use crate::*;
@ -11,7 +11,7 @@ fn binary_ptr_op(
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)>;
) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)>;
}
impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
@ -20,7 +20,7 @@ fn binary_ptr_op(
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
@ -50,7 +50,7 @@ fn binary_ptr_op(
Ge => left >= right,
_ => bug!(),
};
(Scalar::from_bool(res), false, self.tcx.types.bool)
(ImmTy::from_bool(res, *self.tcx), false)
}
// Some more operations are possible with atomics.
@ -65,12 +65,12 @@ fn binary_ptr_op(
right.to_scalar().to_target_usize(self)?,
self.machine.layouts.usize,
);
let (result, overflowing, _ty) =
let (result, overflowing) =
self.overflowing_binary_op(bin_op, &left, &right)?;
// Construct a new pointer with the provenance of `ptr` (the LHS).
let result_ptr =
Pointer::new(ptr.provenance, Size::from_bytes(result.to_target_usize(self)?));
(Scalar::from_maybe_pointer(result_ptr, self), overflowing, left.layout.ty)
Pointer::new(ptr.provenance, Size::from_bytes(result.to_scalar().to_target_usize(self)?));
(ImmTy::from_scalar(Scalar::from_maybe_pointer(result_ptr, self), left.layout), overflowing)
}
_ => span_bug!(self.cur_span(), "Invalid operator on pointers: {:?}", bin_op),

View File

@ -89,10 +89,9 @@ fn emulate_intrinsic_by_name(
let [left, right] = check_arg_count(args)?;
let left = this.read_immediate(left)?;
let right = this.read_immediate(right)?;
let (val, _overflowed, _ty) =
this.overflowing_binary_op(mir::BinOp::Eq, &left, &right)?;
let val = this.wrapping_binary_op(mir::BinOp::Eq, &left, &right)?;
// We're type punning a bool as an u8 here.
this.write_scalar(val, dest)?;
this.write_scalar(val.to_scalar(), dest)?;
}
"const_allocate" => {
// For now, for compatibility with the run-time implementation of this, we just return null.
@ -396,7 +395,7 @@ fn emulate_intrinsic_by_name(
),
};
this.write_scalar(res, dest)?;
this.write_immediate(*res, dest)?;
}
// Other

View File

@ -60,7 +60,7 @@ enum Op {
let op = this.read_immediate(&this.project_index(&op, i)?)?;
let dest = this.project_index(&dest, i)?;
let val = match which {
Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar(),
Op::MirOp(mir_op) => this.wrapping_unary_op(mir_op, &op)?.to_scalar(),
Op::Abs => {
// Works for f32 and f64.
let ty::Float(float_ty) = op.layout.ty.kind() else {
@ -177,7 +177,7 @@ enum Op {
let dest = this.project_index(&dest, i)?;
let val = match which {
Op::MirOp(mir_op) => {
let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
let (val, overflowed) = this.overflowing_binary_op(mir_op, &left, &right)?;
if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
// Shifts have extra UB as SIMD operations that the MIR binop does not have.
// See <https://github.com/rust-lang/rust/issues/91237>.
@ -188,13 +188,13 @@ enum Op {
}
if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
// Special handling for boolean-returning operations
assert_eq!(ty, this.tcx.types.bool);
let val = val.to_bool().unwrap();
assert_eq!(val.layout.ty, this.tcx.types.bool);
let val = val.to_scalar().to_bool().unwrap();
bool_to_simd_element(val, dest.layout.size)
} else {
assert_ne!(ty, this.tcx.types.bool);
assert_eq!(ty, dest.layout.ty);
val
assert_ne!(val.layout.ty, this.tcx.types.bool);
assert_eq!(val.layout.ty, dest.layout.ty);
val.to_scalar()
}
}
Op::SaturatingOp(mir_op) => {
@ -304,18 +304,18 @@ enum Op {
let op = this.read_immediate(&this.project_index(&op, i)?)?;
res = match which {
Op::MirOp(mir_op) => {
this.binary_op(mir_op, &res, &op)?
this.wrapping_binary_op(mir_op, &res, &op)?
}
Op::MirOpBool(mir_op) => {
let op = imm_from_bool(simd_element_to_bool(op)?);
this.binary_op(mir_op, &res, &op)?
this.wrapping_binary_op(mir_op, &res, &op)?
}
Op::Max => {
if matches!(res.layout.ty.kind(), ty::Float(_)) {
ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
} else {
// Just boring integers, so NaNs to worry about
if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar().to_bool()? {
if this.wrapping_binary_op(BinOp::Ge, &res, &op)?.to_scalar().to_bool()? {
res
} else {
op
@ -327,7 +327,7 @@ enum Op {
ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
} else {
// Just boring integers, so NaNs to worry about
if this.binary_op(BinOp::Le, &res, &op)?.to_scalar().to_bool()? {
if this.wrapping_binary_op(BinOp::Le, &res, &op)?.to_scalar().to_bool()? {
res
} else {
op
@ -356,7 +356,7 @@ enum Op {
let mut res = init;
for i in 0..op_len {
let op = this.read_immediate(&this.project_index(&op, i)?)?;
res = this.binary_op(mir_op, &res, &op)?;
res = this.wrapping_binary_op(mir_op, &res, &op)?;
}
this.write_immediate(*res, dest)?;
}
@ -487,7 +487,7 @@ enum Op {
to_ty = dest.layout.ty,
),
};
this.write_immediate(val, &dest)?;
this.write_immediate(*val, &dest)?;
}
}
"shuffle" => {

View File

@ -80,8 +80,8 @@ fn bin_op_float<'tcx, F: rustc_apfloat::Float>(
) -> InterpResult<'tcx, Scalar<Provenance>> {
match which {
FloatBinOp::Arith(which) => {
let (res, _overflow, _ty) = this.overflowing_binary_op(which, left, right)?;
Ok(res)
let res = this.wrapping_binary_op(which, left, right)?;
Ok(res.to_scalar())
}
FloatBinOp::Cmp(which) => {
let left = left.to_scalar().to_float::<F>()?;

View File

@ -175,10 +175,10 @@ fn emulate_x86_sse_intrinsic(
let res = this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE semantics.
Scalar::from_int(dest.layout.size.signed_int_min(), dest.layout.size)
ImmTy::from_int(dest.layout.size.signed_int_min(), dest.layout)
});
this.write_scalar(res, dest)?;
this.write_immediate(*res, dest)?;
}
// Used to implement the _mm_cvtsi32_ss and _mm_cvtsi64_ss functions.
// Converts `right` from i32/i64 to f32. Returns a SIMD vector with
@ -197,7 +197,7 @@ fn emulate_x86_sse_intrinsic(
let right = this.read_immediate(right)?;
let dest0 = this.project_index(&dest, 0)?;
let res0 = this.int_to_int_or_float(&right, dest0.layout.ty)?;
this.write_immediate(res0, &dest0)?;
this.write_immediate(*res0, &dest0)?;
for i in 1..dest_len {
this.copy_op(

View File

@ -62,30 +62,30 @@ fn emulate_x86_sse2_intrinsic(
let right = this.int_to_int_or_float(&right, twice_wide_ty)?;
// Calculate left + right + 1
let (added, _overflow, _ty) = this.overflowing_binary_op(
let added = this.wrapping_binary_op(
mir::BinOp::Add,
&ImmTy::from_immediate(left, twice_wide_layout),
&ImmTy::from_immediate(right, twice_wide_layout),
&left,
&right,
)?;
let (added, _overflow, _ty) = this.overflowing_binary_op(
let added = this.wrapping_binary_op(
mir::BinOp::Add,
&ImmTy::from_scalar(added, twice_wide_layout),
&added,
&ImmTy::from_uint(1u32, twice_wide_layout),
)?;
// Calculate (left + right + 1) / 2
let (divided, _overflow, _ty) = this.overflowing_binary_op(
let divided = this.wrapping_binary_op(
mir::BinOp::Div,
&ImmTy::from_scalar(added, twice_wide_layout),
&added,
&ImmTy::from_uint(2u32, twice_wide_layout),
)?;
// Narrow back to the original type
let res = this.int_to_int_or_float(
&ImmTy::from_scalar(divided, twice_wide_layout),
&divided,
dest.layout.ty,
)?;
this.write_immediate(res, &dest)?;
this.write_immediate(*res, &dest)?;
}
}
// Used to implement the _mm_mulhi_epi16 and _mm_mulhi_epu16 functions.
@ -112,24 +112,24 @@ fn emulate_x86_sse2_intrinsic(
let right = this.int_to_int_or_float(&right, twice_wide_ty)?;
// Multiply
let (multiplied, _overflow, _ty) = this.overflowing_binary_op(
let multiplied = this.wrapping_binary_op(
mir::BinOp::Mul,
&ImmTy::from_immediate(left, twice_wide_layout),
&ImmTy::from_immediate(right, twice_wide_layout),
&left,
&right,
)?;
// Keep the high half
let (high, _overflow, _ty) = this.overflowing_binary_op(
let high = this.wrapping_binary_op(
mir::BinOp::Shr,
&ImmTy::from_scalar(multiplied, twice_wide_layout),
&multiplied,
&ImmTy::from_uint(dest.layout.size.bits(), twice_wide_layout),
)?;
// Narrow back to the original type
let res = this.int_to_int_or_float(
&ImmTy::from_scalar(high, twice_wide_layout),
&high,
dest.layout.ty,
)?;
this.write_immediate(res, &dest)?;
this.write_immediate(*res, &dest)?;
}
}
// Used to implement the _mm_mul_epu32 function.
@ -394,9 +394,9 @@ enum ShiftOp {
let res =
this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE2 semantics.
Scalar::from_i32(i32::MIN)
ImmTy::from_int(i32::MIN, this.machine.layouts.i32)
});
this.write_scalar(res, &dest)?;
this.write_immediate(*res, &dest)?;
}
}
// Used to implement the _mm_packs_epi16 function.
@ -649,7 +649,7 @@ enum ShiftOp {
let dest = this.project_index(&dest, i)?;
let res = this.float_to_float_or_int(&op, dest.layout.ty)?;
this.write_immediate(res, &dest)?;
this.write_immediate(*res, &dest)?;
}
// For f32 -> f64, ignore the remaining
// For f64 -> f32, fill the remaining with zeros
@ -687,9 +687,9 @@ enum ShiftOp {
let res =
this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE2 semantics.
Scalar::from_i32(i32::MIN)
ImmTy::from_int(i32::MIN, this.machine.layouts.i32)
});
this.write_scalar(res, &dest)?;
this.write_immediate(*res, &dest)?;
}
// Fill the remaining with zeros
for i in op_len..dest_len {
@ -718,10 +718,10 @@ enum ShiftOp {
let res = this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE semantics.
Scalar::from_int(dest.layout.size.signed_int_min(), dest.layout.size)
ImmTy::from_int(dest.layout.size.signed_int_min(), dest.layout)
});
this.write_scalar(res, dest)?;
this.write_immediate(*res, dest)?;
}
// Used to implement the _mm_cvtsd_ss and _mm_cvtss_sd functions.
// Converts the first f64/f32 from `right` to f32/f64 and copies
@ -742,7 +742,7 @@ enum ShiftOp {
// `float_to_float_or_int` here will convert from f64 to f32 (cvtsd2ss) or
// from f32 to f64 (cvtss2sd).
let res0 = this.float_to_float_or_int(&right0, dest0.layout.ty)?;
this.write_immediate(res0, &dest0)?;
this.write_immediate(*res0, &dest0)?;
// Copy remianing from `left`
for i in 1..dest_len {