2016-11-03 04:38:08 -05:00
|
|
|
use rustc::mir;
|
2018-08-23 12:28:48 -05:00
|
|
|
use rustc::ty::layout::{self, LayoutOf, Size};
|
2017-11-21 06:32:40 -06:00
|
|
|
use rustc::ty;
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2018-08-26 13:42:26 -05:00
|
|
|
use rustc::mir::interpret::{EvalResult, Scalar, ScalarMaybeUndef, PointerArithmetic};
|
2018-08-15 14:01:40 -05:00
|
|
|
use rustc_mir::interpret::{
|
2018-08-25 14:22:06 -05:00
|
|
|
PlaceTy, EvalContext, OpTy, Value
|
2018-08-15 14:01:40 -05:00
|
|
|
};
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2018-08-26 13:42:26 -05:00
|
|
|
use super::{FalibleScalarExt, OperatorEvalContextExt};
|
2018-05-26 10:07:34 -05:00
|
|
|
|
2017-07-28 06:08:27 -05:00
|
|
|
pub trait EvalContextExt<'tcx> {
|
|
|
|
fn call_intrinsic(
|
|
|
|
&mut self,
|
|
|
|
instance: ty::Instance<'tcx>,
|
2018-08-15 14:01:40 -05:00
|
|
|
args: &[OpTy<'tcx>],
|
|
|
|
dest: PlaceTy<'tcx>,
|
2017-07-28 06:08:27 -05:00
|
|
|
) -> EvalResult<'tcx>;
|
|
|
|
}
|
|
|
|
|
2018-01-14 11:59:13 -06:00
|
|
|
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
|
2017-07-28 06:08:27 -05:00
|
|
|
fn call_intrinsic(
|
2016-09-20 09:05:30 -05:00
|
|
|
&mut self,
|
2017-03-21 07:53:55 -05:00
|
|
|
instance: ty::Instance<'tcx>,
|
2018-08-15 14:01:40 -05:00
|
|
|
args: &[OpTy<'tcx>],
|
|
|
|
dest: PlaceTy<'tcx>,
|
2017-02-04 15:09:10 -06:00
|
|
|
) -> EvalResult<'tcx> {
|
2018-08-23 12:28:48 -05:00
|
|
|
if self.emulate_intrinsic(instance, args, dest)? {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2017-03-29 02:10:05 -05:00
|
|
|
let substs = instance.substs;
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2018-05-01 04:04:01 -05:00
|
|
|
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
|
2016-09-29 09:42:01 -05:00
|
|
|
match intrinsic_name {
|
2017-08-10 10:48:38 -05:00
|
|
|
"add_with_overflow" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
self.binop_with_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Add,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?
|
|
|
|
}
|
|
|
|
|
|
|
|
"sub_with_overflow" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
self.binop_with_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Sub,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?
|
|
|
|
}
|
|
|
|
|
|
|
|
"mul_with_overflow" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
self.binop_with_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Mul,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?
|
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
"arith_offset" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let offset = self.read_scalar(args[1])?.to_isize(&self)?;
|
|
|
|
let ptr = self.read_scalar(args[0])?.not_undef()?;
|
|
|
|
|
|
|
|
let pointee_ty = substs.type_at(0);
|
|
|
|
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
|
|
|
|
let offset = offset.overflowing_mul(pointee_size).0;
|
|
|
|
let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
|
|
|
|
self.write_scalar(result_ptr, dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"assume" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let cond = self.read_scalar(args[0])?.to_bool()?;
|
2017-08-10 10:48:38 -05:00
|
|
|
if !cond {
|
|
|
|
return err!(AssumptionNotHeld);
|
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2016-10-14 04:49:02 -05:00
|
|
|
"atomic_load" |
|
2016-12-16 19:10:16 -06:00
|
|
|
"atomic_load_relaxed" |
|
2016-11-15 08:19:38 -06:00
|
|
|
"atomic_load_acq" |
|
2016-10-14 04:49:02 -05:00
|
|
|
"volatile_load" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
|
|
|
|
self.write_scalar(val, dest)?;
|
2016-10-14 04:49:02 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"atomic_store" |
|
2016-12-15 11:27:47 -06:00
|
|
|
"atomic_store_relaxed" |
|
|
|
|
"atomic_store_rel" |
|
2016-10-14 04:49:02 -05:00
|
|
|
"volatile_store" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
|
|
|
|
self.write_scalar(val, ptr.into())?;
|
2016-10-14 04:49:02 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 11:32:06 -05:00
|
|
|
"atomic_fence_acq" => {
|
|
|
|
// we are inherently singlethreaded and singlecored, this is a nop
|
|
|
|
}
|
|
|
|
|
2017-03-14 07:05:51 -05:00
|
|
|
_ if intrinsic_name.starts_with("atomic_xchg") => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let new = self.read_scalar(args[1])?;
|
|
|
|
let old = self.read_scalar(ptr.into())?;
|
|
|
|
self.write_scalar(old, dest)?; // old value is returned
|
|
|
|
self.write_scalar(new, ptr.into())?;
|
2016-11-15 08:19:38 -06:00
|
|
|
}
|
|
|
|
|
2017-03-14 07:05:51 -05:00
|
|
|
_ if intrinsic_name.starts_with("atomic_cxchg") => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op()`
|
|
|
|
let new = self.read_scalar(args[2])?;
|
|
|
|
let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op()`
|
|
|
|
// binary_op will bail if either of them is not a scalar
|
|
|
|
let (eq, _) = self.binary_op(mir::BinOp::Eq, old, expect_old)?;
|
|
|
|
let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
|
|
|
|
self.write_value(res, dest)?; // old value is returned
|
|
|
|
// update ptr depending on comparison
|
|
|
|
if eq.to_bool()? {
|
|
|
|
self.write_scalar(new, ptr.into())?;
|
|
|
|
}
|
2017-08-10 10:48:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"atomic_or" |
|
|
|
|
"atomic_or_acq" |
|
|
|
|
"atomic_or_rel" |
|
|
|
|
"atomic_or_acqrel" |
|
|
|
|
"atomic_or_relaxed" |
|
|
|
|
"atomic_xor" |
|
|
|
|
"atomic_xor_acq" |
|
|
|
|
"atomic_xor_rel" |
|
|
|
|
"atomic_xor_acqrel" |
|
|
|
|
"atomic_xor_relaxed" |
|
|
|
|
"atomic_and" |
|
|
|
|
"atomic_and_acq" |
|
|
|
|
"atomic_and_rel" |
|
|
|
|
"atomic_and_acqrel" |
|
|
|
|
"atomic_and_relaxed" |
|
|
|
|
"atomic_xadd" |
|
|
|
|
"atomic_xadd_acq" |
|
|
|
|
"atomic_xadd_rel" |
|
|
|
|
"atomic_xadd_acqrel" |
|
|
|
|
"atomic_xadd_relaxed" |
|
|
|
|
"atomic_xsub" |
|
|
|
|
"atomic_xsub_acq" |
|
|
|
|
"atomic_xsub_rel" |
|
|
|
|
"atomic_xsub_acqrel" |
|
|
|
|
"atomic_xsub_relaxed" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let rhs = self.read_value(args[1])?;
|
|
|
|
let old = self.read_value(ptr.into())?;
|
|
|
|
self.write_value(*old, dest)?; // old value is returned
|
2017-03-14 07:05:51 -05:00
|
|
|
let op = match intrinsic_name.split('_').nth(1).unwrap() {
|
|
|
|
"or" => mir::BinOp::BitOr,
|
|
|
|
"xor" => mir::BinOp::BitXor,
|
|
|
|
"and" => mir::BinOp::BitAnd,
|
|
|
|
"xadd" => mir::BinOp::Add,
|
|
|
|
"xsub" => mir::BinOp::Sub,
|
|
|
|
_ => bug!(),
|
2016-11-03 11:32:06 -05:00
|
|
|
};
|
|
|
|
// FIXME: what do atomics do on overflow?
|
2018-08-15 14:01:40 -05:00
|
|
|
let (val, _) = self.binary_op(op, old, rhs)?;
|
|
|
|
self.write_scalar(val, ptr.into())?;
|
2017-08-10 10:48:38 -05:00
|
|
|
}
|
2016-11-03 11:32:06 -05:00
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
"breakpoint" => unimplemented!(), // halt miri
|
|
|
|
|
|
|
|
"copy" |
|
|
|
|
"copy_nonoverlapping" => {
|
2017-03-29 02:10:05 -05:00
|
|
|
let elem_ty = substs.type_at(0);
|
2017-12-06 08:03:24 -06:00
|
|
|
let elem_layout = self.layout_of(elem_ty)?;
|
|
|
|
let elem_size = elem_layout.size.bytes();
|
2018-08-15 14:01:40 -05:00
|
|
|
let count = self.read_scalar(args[2])?.to_usize(&self)?;
|
2017-07-20 15:20:33 -05:00
|
|
|
if count * elem_size != 0 {
|
|
|
|
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
|
|
|
|
// Also see the write_bytes intrinsic.
|
2018-01-02 16:43:03 -06:00
|
|
|
let elem_align = elem_layout.align;
|
2018-08-15 14:01:40 -05:00
|
|
|
let src = self.read_scalar(args[0])?.not_undef()?;
|
|
|
|
let dest = self.read_scalar(args[1])?.not_undef()?;
|
2017-08-10 10:48:38 -05:00
|
|
|
self.memory.copy(
|
|
|
|
src,
|
2018-03-19 02:12:53 -05:00
|
|
|
elem_align,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
elem_align,
|
2018-05-20 04:26:40 -05:00
|
|
|
Size::from_bytes(count * elem_size),
|
2017-08-10 10:48:38 -05:00
|
|
|
intrinsic_name.ends_with("_nonoverlapping"),
|
|
|
|
)?;
|
2017-06-19 03:58:59 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"discriminant_value" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let place = self.ref_to_mplace(self.read_value(args[0])?)?;
|
2018-08-25 04:07:21 -05:00
|
|
|
let discr_val = self.read_discriminant(place.into())?.0;
|
2018-08-15 14:01:40 -05:00
|
|
|
self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-08-10 10:48:38 -05:00
|
|
|
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
|
|
|
|
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let f = self.read_scalar(args[0])?.to_f32()?;
|
2017-03-14 06:35:38 -05:00
|
|
|
let f = match intrinsic_name {
|
|
|
|
"sinf32" => f.sin(),
|
|
|
|
"fabsf32" => f.abs(),
|
|
|
|
"cosf32" => f.cos(),
|
|
|
|
"sqrtf32" => f.sqrt(),
|
|
|
|
"expf32" => f.exp(),
|
|
|
|
"exp2f32" => f.exp2(),
|
|
|
|
"logf32" => f.ln(),
|
|
|
|
"log10f32" => f.log10(),
|
|
|
|
"log2f32" => f.log2(),
|
|
|
|
"floorf32" => f.floor(),
|
|
|
|
"ceilf32" => f.ceil(),
|
|
|
|
"truncf32" => f.trunc(),
|
|
|
|
_ => bug!(),
|
|
|
|
};
|
2018-08-15 14:01:40 -05:00
|
|
|
self.write_scalar(Scalar::from_f32(f), dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-08-10 10:48:38 -05:00
|
|
|
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
|
|
|
|
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let f = self.read_scalar(args[0])?.to_f64()?;
|
2017-03-14 06:35:38 -05:00
|
|
|
let f = match intrinsic_name {
|
|
|
|
"sinf64" => f.sin(),
|
|
|
|
"fabsf64" => f.abs(),
|
|
|
|
"cosf64" => f.cos(),
|
|
|
|
"sqrtf64" => f.sqrt(),
|
|
|
|
"expf64" => f.exp(),
|
|
|
|
"exp2f64" => f.exp2(),
|
|
|
|
"logf64" => f.ln(),
|
|
|
|
"log10f64" => f.log10(),
|
|
|
|
"log2f64" => f.log2(),
|
|
|
|
"floorf64" => f.floor(),
|
|
|
|
"ceilf64" => f.ceil(),
|
|
|
|
"truncf64" => f.trunc(),
|
|
|
|
_ => bug!(),
|
|
|
|
};
|
2018-08-15 14:01:40 -05:00
|
|
|
self.write_scalar(Scalar::from_f64(f), dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-02-10 07:58:34 -06:00
|
|
|
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let a = self.read_value(args[0])?;
|
|
|
|
let b = self.read_value(args[1])?;
|
2017-02-10 07:58:34 -06:00
|
|
|
let op = match intrinsic_name {
|
|
|
|
"fadd_fast" => mir::BinOp::Add,
|
|
|
|
"fsub_fast" => mir::BinOp::Sub,
|
|
|
|
"fmul_fast" => mir::BinOp::Mul,
|
|
|
|
"fdiv_fast" => mir::BinOp::Div,
|
|
|
|
"frem_fast" => mir::BinOp::Rem,
|
|
|
|
_ => bug!(),
|
|
|
|
};
|
2018-08-15 14:01:40 -05:00
|
|
|
let result = self.binary_op(op, a, b)?;
|
|
|
|
self.write_scalar(result.0, dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2018-05-07 11:02:57 -05:00
|
|
|
"exact_div" => {
|
|
|
|
// Performs an exact division, resulting in undefined behavior where
|
|
|
|
// `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
|
2018-08-15 14:01:40 -05:00
|
|
|
let a = self.read_value(args[0])?;
|
|
|
|
let b = self.read_value(args[1])?;
|
2018-05-07 11:02:57 -05:00
|
|
|
// check x % y != 0
|
2018-08-15 14:01:40 -05:00
|
|
|
if !self.binary_op(mir::BinOp::Rem, a, b)?.0.is_null() {
|
2018-05-07 11:02:57 -05:00
|
|
|
return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
|
|
|
|
}
|
2018-08-15 14:01:40 -05:00
|
|
|
let result = self.binary_op(mir::BinOp::Div, a, b)?;
|
|
|
|
self.write_scalar(result.0, dest)?;
|
2018-05-07 11:02:57 -05:00
|
|
|
},
|
|
|
|
|
2017-08-10 10:48:38 -05:00
|
|
|
"likely" | "unlikely" | "forget" => {}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2016-10-14 04:31:45 -05:00
|
|
|
"init" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
|
|
|
|
// but we also do not want to create a new allocation with 0s and then copy that over.
|
2018-08-18 04:59:28 -05:00
|
|
|
if !dest.layout.is_zst() { // notzhing to do for ZST
|
|
|
|
match dest.layout.abi {
|
|
|
|
layout::Abi::Scalar(ref s) => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let x = Scalar::from_int(0, s.value.size(&self));
|
2018-08-18 04:59:28 -05:00
|
|
|
self.write_value(Value::Scalar(x.into()), dest)?;
|
|
|
|
}
|
|
|
|
layout::Abi::ScalarPair(ref s1, ref s2) => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let x = Scalar::from_int(0, s1.value.size(&self));
|
|
|
|
let y = Scalar::from_int(0, s2.value.size(&self));
|
2018-08-18 04:59:28 -05:00
|
|
|
self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
// Do it in memory
|
|
|
|
let mplace = self.force_allocation(dest)?;
|
2018-08-25 14:22:06 -05:00
|
|
|
assert!(mplace.extra.is_none());
|
2018-08-18 04:59:28 -05:00
|
|
|
self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
|
|
|
|
}
|
2018-04-07 04:43:46 -05:00
|
|
|
}
|
2016-11-03 06:31:04 -05:00
|
|
|
}
|
2016-10-14 04:31:45 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
"pref_align_of" => {
|
2017-03-29 02:10:05 -05:00
|
|
|
let ty = substs.type_at(0);
|
2017-12-06 08:03:24 -06:00
|
|
|
let layout = self.layout_of(ty)?;
|
2017-11-21 06:32:40 -06:00
|
|
|
let align = layout.align.pref();
|
2018-08-26 13:42:26 -05:00
|
|
|
let ptr_size = self.pointer_size();
|
2018-08-07 08:22:11 -05:00
|
|
|
let align_val = Scalar::from_uint(align as u128, ptr_size);
|
2018-08-15 14:01:40 -05:00
|
|
|
self.write_scalar(align_val, dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"move_val_init" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
self.copy_op(args[1], ptr.into())?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"needs_drop" => {
|
2017-03-29 02:10:05 -05:00
|
|
|
let ty = substs.type_at(0);
|
2018-01-14 11:59:13 -06:00
|
|
|
let env = ty::ParamEnv::reveal_all();
|
|
|
|
let needs_drop = ty.needs_drop(self.tcx.tcx, env);
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_bool(needs_drop),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"offset" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let offset = self.read_scalar(args[1])?.to_isize(&self)?;
|
|
|
|
let ptr = self.read_scalar(args[0])?.not_undef()?;
|
|
|
|
let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
|
|
|
|
self.write_scalar(result_ptr, dest)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"overflowing_sub" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Sub,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"overflowing_mul" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Mul,
|
2018-08-15 14:01:40 -05:00
|
|
|
r,
|
|
|
|
l,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"overflowing_add" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Add,
|
2018-08-15 14:01:40 -05:00
|
|
|
r,
|
|
|
|
l,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-03-14 06:35:38 -05:00
|
|
|
"powf32" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let f = self.read_scalar(args[0])?.to_f32()?;
|
|
|
|
let f2 = self.read_scalar(args[1])?.to_f32()?;
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_f32(f.powf(f2)),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-03-14 06:35:38 -05:00
|
|
|
"powf64" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let f = self.read_scalar(args[0])?.to_f64()?;
|
|
|
|
let f2 = self.read_scalar(args[1])?.to_f64()?;
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_f64(f.powf(f2)),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-03-14 06:35:38 -05:00
|
|
|
"fmaf32" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let a = self.read_scalar(args[0])?.to_f32()?;
|
|
|
|
let b = self.read_scalar(args[1])?.to_f32()?;
|
|
|
|
let c = self.read_scalar(args[2])?.to_f32()?;
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_f32(a * b + c),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2017-03-14 06:35:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"fmaf64" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let a = self.read_scalar(args[0])?.to_f64()?;
|
|
|
|
let b = self.read_scalar(args[1])?.to_f64()?;
|
|
|
|
let c = self.read_scalar(args[2])?.to_f64()?;
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_f64(a * b + c),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-03-14 06:35:38 -05:00
|
|
|
"powif32" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let f = self.read_scalar(args[0])?.to_f32()?;
|
2018-08-15 14:01:40 -05:00
|
|
|
let i = self.read_scalar(args[1])?.to_i32()?;
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_f32(f.powi(i)),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-03-14 06:35:38 -05:00
|
|
|
"powif64" => {
|
2018-08-26 13:42:26 -05:00
|
|
|
let f = self.read_scalar(args[0])?.to_f64()?;
|
2018-08-15 14:01:40 -05:00
|
|
|
let i = self.read_scalar(args[1])?.to_i32()?;
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
|
|
|
Scalar::from_f64(f.powi(i)),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"size_of_val" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let (size, _) = self.size_and_align_of_mplace(mplace)?;
|
2018-08-26 13:42:26 -05:00
|
|
|
let ptr_size = self.pointer_size();
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
2018-08-07 08:22:11 -05:00
|
|
|
Scalar::from_uint(size.bytes() as u128, ptr_size),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
2016-11-03 11:32:06 -05:00
|
|
|
|
|
|
|
"min_align_of_val" |
|
|
|
|
"align_of_val" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
|
|
|
|
let (_, align) = self.size_and_align_of_mplace(mplace)?;
|
2018-08-26 13:42:26 -05:00
|
|
|
let ptr_size = self.pointer_size();
|
2018-05-26 10:07:34 -05:00
|
|
|
self.write_scalar(
|
2018-08-07 08:22:11 -05:00
|
|
|
Scalar::from_uint(align.abi(), ptr_size),
|
2018-08-15 14:01:40 -05:00
|
|
|
dest,
|
2017-08-10 10:48:38 -05:00
|
|
|
)?;
|
2016-11-03 11:32:06 -05:00
|
|
|
}
|
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
"type_name" => {
|
2017-03-29 02:10:05 -05:00
|
|
|
let ty = substs.type_at(0);
|
2016-09-20 09:05:30 -05:00
|
|
|
let ty_name = ty.to_string();
|
2017-08-24 07:41:49 -05:00
|
|
|
let value = self.str_to_value(&ty_name)?;
|
2018-08-15 14:01:40 -05:00
|
|
|
self.write_value(value, dest)?;
|
2016-09-23 03:38:30 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
"transmute" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
// Go through an allocation, to make sure the completely different layouts
|
|
|
|
// do not pose a problem. (When the user transmutes through a union,
|
|
|
|
// there will not be a layout mismatch.)
|
|
|
|
let dest = self.force_allocation(dest)?;
|
|
|
|
self.copy_op(args[0], dest.into())?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2017-07-03 21:55:15 -05:00
|
|
|
"unchecked_shl" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let bits = dest.layout.size.bytes() as u128 * 8;
|
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
let rval = r.to_scalar()?.to_bytes()?;
|
|
|
|
if rval >= bits {
|
2017-08-10 10:48:38 -05:00
|
|
|
return err!(Intrinsic(
|
2018-08-15 14:01:40 -05:00
|
|
|
format!("Overflowing shift by {} in unchecked_shl", rval),
|
2017-08-10 10:48:38 -05:00
|
|
|
));
|
2017-07-03 22:27:09 -05:00
|
|
|
}
|
2018-08-15 14:01:40 -05:00
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Shl,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2017-07-03 21:55:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"unchecked_shr" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let bits = dest.layout.size.bytes() as u128 * 8;
|
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
let rval = r.to_scalar()?.to_bytes()?;
|
|
|
|
if rval >= bits {
|
2017-08-10 10:48:38 -05:00
|
|
|
return err!(Intrinsic(
|
2018-08-15 14:01:40 -05:00
|
|
|
format!("Overflowing shift by {} in unchecked_shr", rval),
|
2017-08-10 10:48:38 -05:00
|
|
|
));
|
2017-07-03 22:27:09 -05:00
|
|
|
}
|
2018-08-15 14:01:40 -05:00
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Shr,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2017-07-03 21:55:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"unchecked_div" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
let rval = r.to_scalar()?.to_bytes()?;
|
|
|
|
if rval == 0 {
|
2017-08-02 09:59:01 -05:00
|
|
|
return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
|
2017-07-03 21:55:15 -05:00
|
|
|
}
|
2018-08-15 14:01:40 -05:00
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Div,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2017-07-03 21:55:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"unchecked_rem" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
let l = self.read_value(args[0])?;
|
|
|
|
let r = self.read_value(args[1])?;
|
|
|
|
let rval = r.to_scalar()?.to_bytes()?;
|
|
|
|
if rval == 0 {
|
2017-08-02 09:59:01 -05:00
|
|
|
return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
|
2017-07-03 21:55:15 -05:00
|
|
|
}
|
2018-08-15 14:01:40 -05:00
|
|
|
self.binop_ignore_overflow(
|
2017-08-10 10:48:38 -05:00
|
|
|
mir::BinOp::Rem,
|
2018-08-15 14:01:40 -05:00
|
|
|
l,
|
|
|
|
r,
|
2017-08-10 10:48:38 -05:00
|
|
|
dest,
|
|
|
|
)?;
|
2017-07-03 21:55:15 -05:00
|
|
|
}
|
|
|
|
|
2016-10-14 04:31:45 -05:00
|
|
|
"uninit" => {
|
2018-08-15 14:01:40 -05:00
|
|
|
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
|
|
|
|
// but we also do not want to create a new allocation with 0s and then copy that over.
|
2018-08-18 04:59:28 -05:00
|
|
|
if !dest.layout.is_zst() { // nothing to do for ZST
|
|
|
|
match dest.layout.abi {
|
|
|
|
layout::Abi::Scalar(..) => {
|
|
|
|
let x = ScalarMaybeUndef::Undef;
|
|
|
|
self.write_value(Value::Scalar(x), dest)?;
|
|
|
|
}
|
|
|
|
layout::Abi::ScalarPair(..) => {
|
|
|
|
let x = ScalarMaybeUndef::Undef;
|
|
|
|
self.write_value(Value::ScalarPair(x, x), dest)?;
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
// Do it in memory
|
|
|
|
let mplace = self.force_allocation(dest)?;
|
2018-08-25 14:22:06 -05:00
|
|
|
assert!(mplace.extra.is_none());
|
2018-08-18 04:59:28 -05:00
|
|
|
self.memory.mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
|
|
|
|
}
|
2018-04-07 04:43:46 -05:00
|
|
|
}
|
2016-11-03 07:13:47 -05:00
|
|
|
}
|
2016-10-14 04:31:45 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2017-03-13 18:28:29 -05:00
|
|
|
"write_bytes" => {
|
2017-03-29 02:10:05 -05:00
|
|
|
let ty = substs.type_at(0);
|
2017-12-06 08:03:24 -06:00
|
|
|
let ty_layout = self.layout_of(ty)?;
|
2018-08-15 14:01:40 -05:00
|
|
|
let val_byte = self.read_scalar(args[1])?.to_u8()?;
|
|
|
|
let ptr = self.read_scalar(args[0])?.not_undef()?;
|
|
|
|
let count = self.read_scalar(args[2])?.to_usize(&self)?;
|
2017-06-02 20:35:33 -05:00
|
|
|
if count > 0 {
|
2017-07-20 15:20:33 -05:00
|
|
|
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
|
|
|
|
// TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
|
2018-01-02 16:43:03 -06:00
|
|
|
self.memory.check_align(ptr, ty_layout.align)?;
|
2018-05-20 04:26:40 -05:00
|
|
|
self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
|
2017-06-02 20:35:33 -05:00
|
|
|
}
|
2017-03-13 18:28:29 -05:00
|
|
|
}
|
|
|
|
|
2017-08-02 09:59:01 -05:00
|
|
|
name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|