rust/src/intrinsic.rs

464 lines
19 KiB
Rust
Raw Normal View History

use rustc::mir;
use rustc::ty::layout::{self, LayoutOf, Size};
2017-11-21 06:32:40 -06:00
use rustc::ty;
2016-09-20 09:05:30 -05:00
2018-10-02 02:25:55 -05:00
use rustc::mir::interpret::{EvalResult, PointerArithmetic};
2016-09-20 09:05:30 -05:00
2018-11-01 02:56:41 -05:00
use crate::{
2018-11-05 01:51:55 -06:00
PlaceTy, OpTy, Immediate, Scalar, ScalarMaybeUndef, Borrow,
2018-10-19 02:51:04 -05:00
ScalarExt, OperatorEvalContextExt
2018-10-02 02:25:55 -05:00
};
2018-05-26 10:07:34 -05:00
pub trait EvalContextExt<'tcx> {
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx>;
}
2018-10-16 11:01:50 -05:00
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn call_intrinsic(
2016-09-20 09:05:30 -05:00
&mut self,
2017-03-21 07:53:55 -05:00
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
if self.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
let tcx = &{self.tcx.tcx};
2017-03-29 02:10:05 -05:00
let substs = instance.substs;
2016-09-20 09:05:30 -05:00
// All these intrinsics take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
2016-09-29 09:42:01 -05:00
match intrinsic_name {
2016-09-20 09:05:30 -05:00
"arith_offset" => {
2018-11-05 01:51:55 -06:00
let offset = self.read_scalar(args[1])?.to_isize(self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let pointee_ty = substs.type_at(0);
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.overflowing_mul(pointee_size).0;
2018-11-05 01:51:55 -06:00
let result_ptr = ptr.ptr_wrapping_signed_offset(offset, self);
self.write_scalar(result_ptr, dest)?;
2016-09-20 09:05:30 -05:00
}
"assume" => {
let cond = self.read_scalar(args[0])?.to_bool()?;
if !cond {
return err!(AssumptionNotHeld);
}
2016-09-20 09:05:30 -05:00
}
2016-10-14 04:49:02 -05:00
"atomic_load" |
"atomic_load_relaxed" |
2016-11-15 08:19:38 -06:00
"atomic_load_acq" |
2016-10-14 04:49:02 -05:00
"volatile_load" => {
let ptr = self.deref_operand(args[0])?;
let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, dest)?;
2016-10-14 04:49:02 -05:00
}
"atomic_store" |
2016-12-15 11:27:47 -06:00
"atomic_store_relaxed" |
"atomic_store_rel" |
2016-10-14 04:49:02 -05:00
"volatile_store" => {
let ptr = self.deref_operand(args[0])?;
let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, ptr.into())?;
2016-10-14 04:49:02 -05:00
}
2016-11-03 11:32:06 -05:00
"atomic_fence_acq" => {
// we are inherently singlethreaded and singlecored, this is a nop
}
2017-03-14 07:05:51 -05:00
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ptr = self.deref_operand(args[0])?;
let new = self.read_scalar(args[1])?;
let old = self.read_scalar(ptr.into())?;
self.write_scalar(old, dest)?; // old value is returned
self.write_scalar(new, ptr.into())?;
2016-11-15 08:19:38 -06:00
}
2017-03-14 07:05:51 -05:00
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ptr = self.deref_operand(args[0])?;
let expect_old = self.read_immediate(args[1])?; // read as immediate for the sake of `binary_op_imm()`
let new = self.read_scalar(args[2])?;
let old = self.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op_imm()`
2018-11-07 03:11:16 -06:00
// binary_op_imm will bail if either of them is not a scalar
let (eq, _) = self.binary_op_imm(mir::BinOp::Eq, old, expect_old)?;
2018-11-05 01:51:55 -06:00
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
self.write_immediate(res, dest)?; // old value is returned
// update ptr depending on comparison
if eq.to_bool()? {
self.write_scalar(new, ptr.into())?;
}
}
"atomic_or" |
"atomic_or_acq" |
"atomic_or_rel" |
"atomic_or_acqrel" |
"atomic_or_relaxed" |
"atomic_xor" |
"atomic_xor_acq" |
"atomic_xor_rel" |
"atomic_xor_acqrel" |
"atomic_xor_relaxed" |
"atomic_and" |
"atomic_and_acq" |
"atomic_and_rel" |
"atomic_and_acqrel" |
"atomic_and_relaxed" |
"atomic_xadd" |
"atomic_xadd_acq" |
"atomic_xadd_rel" |
"atomic_xadd_acqrel" |
"atomic_xadd_relaxed" |
"atomic_xsub" |
"atomic_xsub_acq" |
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ptr = self.deref_operand(args[0])?;
if !ptr.layout.ty.is_integral() {
return err!(Unimplemented(format!("Atomic arithmetic operations only work on integer types")));
}
2018-11-05 01:51:55 -06:00
let rhs = self.read_immediate(args[1])?;
let old = self.read_immediate(ptr.into())?;
self.write_immediate(*old, dest)?; // old value is returned
2017-03-14 07:05:51 -05:00
let op = match intrinsic_name.split('_').nth(1).unwrap() {
"or" => mir::BinOp::BitOr,
"xor" => mir::BinOp::BitXor,
"and" => mir::BinOp::BitAnd,
"xadd" => mir::BinOp::Add,
"xsub" => mir::BinOp::Sub,
_ => bug!(),
2016-11-03 11:32:06 -05:00
};
2018-10-12 02:07:56 -05:00
// Atomics wrap around on overflow.
self.binop_ignore_overflow(op, old, rhs, ptr.into())?;
}
2016-11-03 11:32:06 -05:00
2016-09-20 09:05:30 -05:00
"breakpoint" => unimplemented!(), // halt miri
"copy" |
"copy_nonoverlapping" => {
2017-03-29 02:10:05 -05:00
let elem_ty = substs.type_at(0);
2017-12-06 08:03:24 -06:00
let elem_layout = self.layout_of(elem_ty)?;
let elem_size = elem_layout.size.bytes();
2018-11-05 01:51:55 -06:00
let count = self.read_scalar(args[2])?.to_usize(self)?;
2018-11-23 02:46:51 -06:00
let elem_align = elem_layout.align.abi;
// erase tags: this is a raw ptr operation
2018-11-17 03:11:21 -06:00
let src = self.read_scalar(args[0])?.not_undef()?;
let dest = self.read_scalar(args[1])?.not_undef()?;
2018-10-19 12:51:41 -05:00
self.memory_mut().copy(
2018-11-17 03:11:21 -06:00
src,
elem_align,
2018-11-17 03:11:21 -06:00
dest,
elem_align,
Size::from_bytes(count * elem_size),
intrinsic_name.ends_with("_nonoverlapping"),
)?;
2016-09-20 09:05:30 -05:00
}
"discriminant_value" => {
let place = self.deref_operand(args[0])?;
2018-08-25 04:07:21 -05:00
let discr_val = self.read_discriminant(place.into())?.0;
self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
2016-09-20 09:05:30 -05:00
}
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
2018-08-26 13:42:26 -05:00
let f = self.read_scalar(args[0])?.to_f32()?;
2017-03-14 06:35:38 -05:00
let f = match intrinsic_name {
"sinf32" => f.sin(),
"fabsf32" => f.abs(),
"cosf32" => f.cos(),
"sqrtf32" => f.sqrt(),
"expf32" => f.exp(),
"exp2f32" => f.exp2(),
"logf32" => f.ln(),
"log10f32" => f.log10(),
"log2f32" => f.log2(),
"floorf32" => f.floor(),
"ceilf32" => f.ceil(),
"truncf32" => f.trunc(),
_ => bug!(),
};
self.write_scalar(Scalar::from_f32(f), dest)?;
2016-09-20 09:05:30 -05:00
}
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
2018-08-26 13:42:26 -05:00
let f = self.read_scalar(args[0])?.to_f64()?;
2017-03-14 06:35:38 -05:00
let f = match intrinsic_name {
"sinf64" => f.sin(),
"fabsf64" => f.abs(),
"cosf64" => f.cos(),
"sqrtf64" => f.sqrt(),
"expf64" => f.exp(),
"exp2f64" => f.exp2(),
"logf64" => f.ln(),
"log10f64" => f.log10(),
"log2f64" => f.log2(),
"floorf64" => f.floor(),
"ceilf64" => f.ceil(),
"truncf64" => f.trunc(),
_ => bug!(),
};
self.write_scalar(Scalar::from_f64(f), dest)?;
2016-09-20 09:05:30 -05:00
}
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
2018-11-05 01:51:55 -06:00
let a = self.read_immediate(args[0])?;
let b = self.read_immediate(args[1])?;
let op = match intrinsic_name {
"fadd_fast" => mir::BinOp::Add,
"fsub_fast" => mir::BinOp::Sub,
"fmul_fast" => mir::BinOp::Mul,
"fdiv_fast" => mir::BinOp::Div,
"frem_fast" => mir::BinOp::Rem,
_ => bug!(),
};
self.binop_ignore_overflow(op, a, b, dest)?;
2016-09-20 09:05:30 -05:00
}
2018-05-07 11:02:57 -05:00
"exact_div" => {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
2018-11-05 01:51:55 -06:00
let a = self.read_immediate(args[0])?;
let b = self.read_immediate(args[1])?;
2018-05-07 11:02:57 -05:00
// check x % y != 0
2018-11-07 03:11:16 -06:00
if self.binary_op_imm(mir::BinOp::Rem, a, b)?.0.to_bytes()? != 0 {
2018-05-07 11:02:57 -05:00
return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
}
self.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
2018-05-07 11:02:57 -05:00
},
"likely" | "unlikely" | "forget" => {}
2016-09-20 09:05:30 -05:00
"init" => {
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
// but we also do not want to create a new allocation with 0s and then copy that over.
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check that the destination pointer is aligned even for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
2018-11-05 01:51:55 -06:00
let x = Scalar::from_int(0, s.value.size(self));
self.write_immediate(Immediate::Scalar(x.into()), dest)?;
}
layout::Abi::ScalarPair(ref s1, ref s2) => {
2018-11-05 01:51:55 -06:00
let x = Scalar::from_int(0, s1.value.size(self));
let y = Scalar::from_int(0, s2.value.size(self));
self.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
}
_ => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
2018-10-05 11:08:50 -05:00
assert!(mplace.meta.is_none());
// not a zst, must be valid pointer
let ptr = mplace.ptr.to_ptr()?;
self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
}
2018-04-07 04:43:46 -05:00
}
}
}
2016-09-20 09:05:30 -05:00
"pref_align_of" => {
2017-03-29 02:10:05 -05:00
let ty = substs.type_at(0);
2017-12-06 08:03:24 -06:00
let layout = self.layout_of(ty)?;
2018-11-23 02:46:51 -06:00
let align = layout.align.pref.bytes();
2018-08-26 13:42:26 -05:00
let ptr_size = self.pointer_size();
2018-08-07 08:22:11 -05:00
let align_val = Scalar::from_uint(align as u128, ptr_size);
self.write_scalar(align_val, dest)?;
2016-09-20 09:05:30 -05:00
}
"move_val_init" => {
let ptr = self.deref_operand(args[0])?;
self.copy_op(args[1], ptr.into())?;
2016-09-20 09:05:30 -05:00
}
"offset" => {
2018-11-05 01:51:55 -06:00
let offset = self.read_scalar(args[1])?.to_isize(self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
self.write_scalar(result_ptr, dest)?;
2016-09-20 09:05:30 -05:00
}
2017-03-14 06:35:38 -05:00
"powf32" => {
2018-08-26 13:42:26 -05:00
let f = self.read_scalar(args[0])?.to_f32()?;
let f2 = self.read_scalar(args[1])?.to_f32()?;
2018-05-26 10:07:34 -05:00
self.write_scalar(
Scalar::from_f32(f.powf(f2)),
dest,
)?;
2016-09-20 09:05:30 -05:00
}
2017-03-14 06:35:38 -05:00
"powf64" => {
2018-08-26 13:42:26 -05:00
let f = self.read_scalar(args[0])?.to_f64()?;
let f2 = self.read_scalar(args[1])?.to_f64()?;
2018-05-26 10:07:34 -05:00
self.write_scalar(
Scalar::from_f64(f.powf(f2)),
dest,
)?;
2016-09-20 09:05:30 -05:00
}
2017-03-14 06:35:38 -05:00
"fmaf32" => {
2018-08-26 13:42:26 -05:00
let a = self.read_scalar(args[0])?.to_f32()?;
let b = self.read_scalar(args[1])?.to_f32()?;
let c = self.read_scalar(args[2])?.to_f32()?;
2018-05-26 10:07:34 -05:00
self.write_scalar(
Scalar::from_f32(a * b + c),
dest,
)?;
2017-03-14 06:35:38 -05:00
}
"fmaf64" => {
2018-08-26 13:42:26 -05:00
let a = self.read_scalar(args[0])?.to_f64()?;
let b = self.read_scalar(args[1])?.to_f64()?;
let c = self.read_scalar(args[2])?.to_f64()?;
2018-05-26 10:07:34 -05:00
self.write_scalar(
Scalar::from_f64(a * b + c),
dest,
)?;
2016-09-20 09:05:30 -05:00
}
2017-03-14 06:35:38 -05:00
"powif32" => {
2018-08-26 13:42:26 -05:00
let f = self.read_scalar(args[0])?.to_f32()?;
let i = self.read_scalar(args[1])?.to_i32()?;
2018-05-26 10:07:34 -05:00
self.write_scalar(
Scalar::from_f32(f.powi(i)),
dest,
)?;
2016-09-20 09:05:30 -05:00
}
2017-03-14 06:35:38 -05:00
"powif64" => {
2018-08-26 13:42:26 -05:00
let f = self.read_scalar(args[0])?.to_f64()?;
let i = self.read_scalar(args[1])?.to_i32()?;
2018-05-26 10:07:34 -05:00
self.write_scalar(
Scalar::from_f64(f.powi(i)),
dest,
)?;
2016-09-20 09:05:30 -05:00
}
"size_of_val" => {
let mplace = self.deref_operand(args[0])?;
let (size, _) = self.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
2018-08-26 13:42:26 -05:00
let ptr_size = self.pointer_size();
2018-05-26 10:07:34 -05:00
self.write_scalar(
2018-08-07 08:22:11 -05:00
Scalar::from_uint(size.bytes() as u128, ptr_size),
dest,
)?;
2016-09-20 09:05:30 -05:00
}
2016-11-03 11:32:06 -05:00
"min_align_of_val" |
"align_of_val" => {
let mplace = self.deref_operand(args[0])?;
let (_, align) = self.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
2018-08-26 13:42:26 -05:00
let ptr_size = self.pointer_size();
2018-05-26 10:07:34 -05:00
self.write_scalar(
2018-11-23 02:46:51 -06:00
Scalar::from_uint(align.bytes(), ptr_size),
dest,
)?;
2016-11-03 11:32:06 -05:00
}
2016-09-20 09:05:30 -05:00
"type_name" => {
2017-03-29 02:10:05 -05:00
let ty = substs.type_at(0);
2016-09-20 09:05:30 -05:00
let ty_name = ty.to_string();
2018-11-05 01:51:55 -06:00
let value = self.str_to_immediate(&ty_name)?;
self.write_immediate(value, dest)?;
2016-09-23 03:38:30 -05:00
}
2016-09-20 09:05:30 -05:00
"unchecked_div" => {
2018-11-05 01:51:55 -06:00
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
if rval == 0 {
2017-08-02 09:59:01 -05:00
return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
}
self.binop_ignore_overflow(
mir::BinOp::Div,
l,
r,
dest,
)?;
}
"unchecked_rem" => {
2018-11-05 01:51:55 -06:00
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
if rval == 0 {
2017-08-02 09:59:01 -05:00
return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
}
self.binop_ignore_overflow(
mir::BinOp::Rem,
l,
r,
dest,
)?;
}
"uninit" => {
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
// but we also do not want to create a new allocation with 0s and then copy that over.
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check alignment for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(..) => {
let x = ScalarMaybeUndef::Undef;
2018-11-05 01:51:55 -06:00
self.write_immediate(Immediate::Scalar(x), dest)?;
}
layout::Abi::ScalarPair(..) => {
let x = ScalarMaybeUndef::Undef;
2018-11-05 01:51:55 -06:00
self.write_immediate(Immediate::ScalarPair(x, x), dest)?;
}
_ => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
2018-10-05 11:08:50 -05:00
assert!(mplace.meta.is_none());
let ptr = mplace.ptr.to_ptr()?;
self.memory_mut()
.get_mut(ptr.alloc_id)?
.mark_definedness(ptr, dest.layout.size, false)?;
}
2018-04-07 04:43:46 -05:00
}
}
}
2016-09-20 09:05:30 -05:00
2017-03-13 18:28:29 -05:00
"write_bytes" => {
2017-03-29 02:10:05 -05:00
let ty = substs.type_at(0);
2017-12-06 08:03:24 -06:00
let ty_layout = self.layout_of(ty)?;
let val_byte = self.read_scalar(args[1])?.to_u8()?;
2018-11-17 03:11:21 -06:00
let ptr = self.read_scalar(args[0])?.not_undef()?;
2018-11-05 01:51:55 -06:00
let count = self.read_scalar(args[2])?.to_usize(self)?;
2018-11-23 02:46:51 -06:00
self.memory().check_align(ptr, ty_layout.align.abi)?;
let byte_count = ty_layout.size * count;
if byte_count.bytes() != 0 {
let ptr = ptr.to_ptr()?;
self.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, val_byte, byte_count)?;
}
2017-03-13 18:28:29 -05:00
}
2017-08-02 09:59:01 -05:00
name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
2016-09-20 09:05:30 -05:00
}
Ok(())
}
}