2016-09-20 09:05:30 -05:00
|
|
|
use rustc::hir::def_id::DefId;
|
2016-11-03 04:38:08 -05:00
|
|
|
use rustc::mir;
|
2016-09-20 09:05:30 -05:00
|
|
|
use rustc::ty::layout::Layout;
|
|
|
|
use rustc::ty::subst::Substs;
|
2016-09-26 10:49:30 -05:00
|
|
|
use rustc::ty::{self, Ty};
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
use error::{EvalError, EvalResult};
|
2016-09-23 03:27:14 -05:00
|
|
|
use interpreter::value::Value;
|
2016-11-03 06:31:04 -05:00
|
|
|
use interpreter::{EvalContext, Lvalue, LvalueExtra};
|
2016-10-20 05:42:19 -05:00
|
|
|
use primval::{self, PrimVal, PrimValKind};
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|
|
|
pub(super) fn call_intrinsic(
|
|
|
|
&mut self,
|
|
|
|
def_id: DefId,
|
|
|
|
substs: &'tcx Substs<'tcx>,
|
|
|
|
args: &[mir::Operand<'tcx>],
|
2016-10-21 03:29:56 -05:00
|
|
|
dest: Lvalue<'tcx>,
|
2016-09-26 10:49:30 -05:00
|
|
|
dest_ty: Ty<'tcx>,
|
2016-09-20 09:05:30 -05:00
|
|
|
dest_layout: &'tcx Layout,
|
2016-11-04 11:51:13 -05:00
|
|
|
target: mir::BasicBlock,
|
2016-09-20 09:05:30 -05:00
|
|
|
) -> EvalResult<'tcx, ()> {
|
2016-10-21 04:17:53 -05:00
|
|
|
let arg_vals: EvalResult<Vec<Value>> = args.iter()
|
2016-09-23 03:27:14 -05:00
|
|
|
.map(|arg| self.eval_operand(arg))
|
2016-09-20 09:05:30 -05:00
|
|
|
.collect();
|
2016-10-21 04:17:53 -05:00
|
|
|
let arg_vals = arg_vals?;
|
2016-09-23 03:27:14 -05:00
|
|
|
let i32 = self.tcx.types.i32;
|
|
|
|
let isize = self.tcx.types.isize;
|
|
|
|
let usize = self.tcx.types.usize;
|
|
|
|
let f32 = self.tcx.types.f32;
|
|
|
|
let f64 = self.tcx.types.f64;
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2016-09-29 09:42:01 -05:00
|
|
|
let intrinsic_name = &self.tcx.item_name(def_id).as_str()[..];
|
|
|
|
match intrinsic_name {
|
2016-10-16 18:18:06 -05:00
|
|
|
"add_with_overflow" =>
|
|
|
|
self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?,
|
2016-10-14 04:31:45 -05:00
|
|
|
|
2016-10-16 18:18:06 -05:00
|
|
|
"sub_with_overflow" =>
|
|
|
|
self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?,
|
|
|
|
|
|
|
|
"mul_with_overflow" =>
|
|
|
|
self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?,
|
2016-10-14 04:31:45 -05:00
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
"arith_offset" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
let offset = self.value_to_primval(arg_vals[1], isize)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_int("arith_offset second arg not isize");
|
2016-09-20 09:05:30 -05:00
|
|
|
let new_ptr = ptr.offset(offset as isize);
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_ptr(new_ptr))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"assume" => {
|
2016-09-23 03:27:14 -05:00
|
|
|
let bool = self.tcx.types.bool;
|
2016-10-21 04:17:53 -05:00
|
|
|
let cond = self.value_to_primval(arg_vals[0], bool)?.try_as_bool()?;
|
2016-10-14 04:52:23 -05:00
|
|
|
if !cond { return Err(EvalError::AssumptionNotHeld); }
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2016-10-14 04:49:02 -05:00
|
|
|
"atomic_load" |
|
2016-11-15 08:19:38 -06:00
|
|
|
"atomic_load_acq" |
|
2016-10-14 04:49:02 -05:00
|
|
|
"volatile_load" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
2016-10-14 04:49:02 -05:00
|
|
|
self.write_value(Value::ByRef(ptr), dest, ty)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
"atomic_store" |
|
|
|
|
"volatile_store" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let dest = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
self.write_value_to_ptr(arg_vals[1], dest, ty)?;
|
2016-10-14 04:49:02 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 11:32:06 -05:00
|
|
|
"atomic_fence_acq" => {
|
|
|
|
// we are inherently singlethreaded and singlecored, this is a nop
|
|
|
|
}
|
|
|
|
|
2016-11-15 08:19:38 -06:00
|
|
|
"atomic_xchg" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
let change = self.value_to_primval(arg_vals[1], ty)?;
|
|
|
|
let old = self.read_value(ptr, ty)?;
|
|
|
|
let old = match old {
|
|
|
|
Value::ByVal(val) => val,
|
|
|
|
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
|
|
|
Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
|
|
|
|
};
|
|
|
|
self.write_primval(dest, old)?;
|
|
|
|
self.write_primval(Lvalue::from_ptr(ptr), change)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
"atomic_cxchg" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
let expect_old = self.value_to_primval(arg_vals[1], ty)?;
|
|
|
|
let change = self.value_to_primval(arg_vals[2], ty)?;
|
|
|
|
let old = self.read_value(ptr, ty)?;
|
|
|
|
let old = match old {
|
|
|
|
Value::ByVal(val) => val,
|
|
|
|
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
|
|
|
Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
|
|
|
|
};
|
|
|
|
let (val, _) = primval::binary_op(mir::BinOp::Eq, old, expect_old)?;
|
|
|
|
let dest = self.force_allocation(dest)?.to_ptr();
|
|
|
|
self.write_pair_to_ptr(old, val, dest, dest_ty)?;
|
|
|
|
self.write_primval(Lvalue::from_ptr(ptr), change)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
"atomic_xadd_relaxed" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
let change = self.value_to_primval(arg_vals[1], ty)?;
|
|
|
|
let old = self.read_value(ptr, ty)?;
|
|
|
|
let old = match old {
|
|
|
|
Value::ByVal(val) => val,
|
|
|
|
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
|
|
|
Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"),
|
|
|
|
};
|
|
|
|
self.write_primval(dest, old)?;
|
|
|
|
// FIXME: what do atomics do on overflow?
|
|
|
|
let (val, _) = primval::binary_op(mir::BinOp::Add, old, change)?;
|
|
|
|
self.write_primval(Lvalue::from_ptr(ptr), val)?;
|
|
|
|
},
|
|
|
|
|
2016-11-03 11:32:06 -05:00
|
|
|
"atomic_xsub_rel" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
let change = self.value_to_primval(arg_vals[1], ty)?;
|
|
|
|
let old = self.read_value(ptr, ty)?;
|
|
|
|
let old = match old {
|
|
|
|
Value::ByVal(val) => val,
|
|
|
|
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
|
|
|
Value::ByValPair(..) => bug!("atomic_xsub_rel doesn't work with nonprimitives"),
|
|
|
|
};
|
|
|
|
self.write_primval(dest, old)?;
|
|
|
|
// FIXME: what do atomics do on overflow?
|
|
|
|
let (val, _) = primval::binary_op(mir::BinOp::Sub, old, change)?;
|
|
|
|
self.write_primval(Lvalue::from_ptr(ptr), val)?;
|
|
|
|
}
|
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
"breakpoint" => unimplemented!(), // halt miri
|
|
|
|
|
|
|
|
"copy" |
|
|
|
|
"copy_nonoverlapping" => {
|
|
|
|
// FIXME: check whether overlapping occurs
|
|
|
|
let elem_ty = substs.type_at(0);
|
2016-11-17 10:23:40 -06:00
|
|
|
let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
|
|
|
|
let elem_align = self.type_align(elem_ty)?;
|
2016-10-21 04:17:53 -05:00
|
|
|
let src = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
let dest = arg_vals[1].read_ptr(&self.memory)?;
|
|
|
|
let count = self.value_to_primval(arg_vals[2], usize)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_uint("arith_offset second arg not isize");
|
2016-09-20 09:05:30 -05:00
|
|
|
self.memory.copy(src, dest, count as usize * elem_size, elem_align)?;
|
|
|
|
}
|
|
|
|
|
2016-09-29 09:42:01 -05:00
|
|
|
"ctpop" |
|
|
|
|
"cttz" |
|
|
|
|
"ctlz" |
|
2016-09-29 08:58:26 -05:00
|
|
|
"bswap" => {
|
|
|
|
let elem_ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let num = self.value_to_primval(arg_vals[0], elem_ty)?;
|
2016-09-29 09:42:01 -05:00
|
|
|
let num = numeric_intrinsic(intrinsic_name, num);
|
2016-10-14 04:31:45 -05:00
|
|
|
self.write_primval(dest, num)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"discriminant_value" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let adt_ptr = arg_vals[0].read_ptr(&self.memory)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::new(discr_val, PrimValKind::U64))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2016-11-03 11:32:06 -05:00
|
|
|
"drop_in_place" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-11-15 07:12:49 -06:00
|
|
|
trace!("drop in place on {}", ty);
|
2016-11-04 10:37:12 -05:00
|
|
|
let ptr_ty = self.tcx.mk_mut_ptr(ty);
|
|
|
|
let lvalue = match self.follow_by_ref_value(arg_vals[0], ptr_ty)? {
|
|
|
|
Value::ByRef(_) => bug!("follow_by_ref_value returned ByRef"),
|
2016-11-15 07:12:49 -06:00
|
|
|
Value::ByVal(value) => Lvalue::from_ptr(value.to_ptr()),
|
2016-11-04 10:37:12 -05:00
|
|
|
Value::ByValPair(ptr, extra) => Lvalue::Ptr {
|
2016-11-15 07:12:49 -06:00
|
|
|
ptr: ptr.to_ptr(),
|
|
|
|
extra: match self.tcx.struct_tail(ty).sty {
|
|
|
|
ty::TyTrait(_) => LvalueExtra::Vtable(extra.to_ptr()),
|
|
|
|
ty::TyStr | ty::TySlice(_) => LvalueExtra::Length(extra.try_as_uint()?),
|
|
|
|
_ => bug!("invalid fat pointer type: {}", ptr_ty),
|
2016-11-04 10:37:12 -05:00
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2016-11-03 11:32:06 -05:00
|
|
|
let mut drops = Vec::new();
|
2016-11-04 10:37:12 -05:00
|
|
|
self.drop(lvalue, ty, &mut drops)?;
|
2016-11-04 11:51:13 -05:00
|
|
|
// need to change the block before pushing the drop impl stack frames
|
|
|
|
// we could do this for all intrinsics before evaluating the intrinsics, but if
|
|
|
|
// the evaluation fails, we should not have moved forward
|
|
|
|
self.goto_block(target);
|
|
|
|
return self.eval_drop_impls(drops);
|
2016-11-03 11:32:06 -05:00
|
|
|
}
|
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
"fabsf32" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let f = self.value_to_primval(arg_vals[2], f32)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_f32("fabsf32 read non f32");
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_f32(f.abs()))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"fabsf64" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let f = self.value_to_primval(arg_vals[2], f64)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_f64("fabsf64 read non f64");
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_f64(f.abs()))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"fadd_fast" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let a = self.value_to_primval(arg_vals[0], ty)?;
|
|
|
|
let b = self.value_to_primval(arg_vals[0], ty)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
let result = primval::binary_op(mir::BinOp::Add, a, b)?;
|
2016-10-14 04:31:45 -05:00
|
|
|
self.write_primval(dest, result.0)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"likely" |
|
|
|
|
"unlikely" |
|
|
|
|
"forget" => {}
|
|
|
|
|
2016-10-14 04:31:45 -05:00
|
|
|
"init" => {
|
|
|
|
let size = dest_layout.size(&self.tcx.data_layout).bytes() as usize;
|
2016-11-03 06:52:13 -05:00
|
|
|
let init = |this: &mut Self, val: Option<Value>| {
|
|
|
|
match val {
|
|
|
|
Some(Value::ByRef(ptr)) => {
|
|
|
|
this.memory.write_repeat(ptr, 0, size)?;
|
2016-11-03 07:13:47 -05:00
|
|
|
Ok(Some(Value::ByRef(ptr)))
|
2016-11-03 06:52:13 -05:00
|
|
|
},
|
|
|
|
None => match this.ty_to_primval_kind(dest_ty) {
|
2016-11-03 07:13:47 -05:00
|
|
|
Ok(kind) => Ok(Some(Value::ByVal(PrimVal::new(0, kind)))),
|
2016-11-03 06:52:13 -05:00
|
|
|
Err(_) => {
|
|
|
|
let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?;
|
|
|
|
this.memory.write_repeat(ptr, 0, size)?;
|
2016-11-03 07:13:47 -05:00
|
|
|
Ok(Some(Value::ByRef(ptr)))
|
2016-11-03 06:52:13 -05:00
|
|
|
}
|
|
|
|
},
|
2016-11-03 07:13:47 -05:00
|
|
|
Some(Value::ByVal(value)) => Ok(Some(Value::ByVal(PrimVal::new(0, value.kind)))),
|
|
|
|
Some(Value::ByValPair(a, b)) => Ok(Some(Value::ByValPair(
|
2016-11-03 06:52:13 -05:00
|
|
|
PrimVal::new(0, a.kind),
|
|
|
|
PrimVal::new(0, b.kind),
|
2016-11-03 07:13:47 -05:00
|
|
|
))),
|
2016-11-03 06:31:04 -05:00
|
|
|
}
|
2016-11-03 06:52:13 -05:00
|
|
|
};
|
|
|
|
match dest {
|
|
|
|
Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
|
2016-11-03 06:31:04 -05:00
|
|
|
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?,
|
|
|
|
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat ptr target"),
|
2016-11-03 06:52:13 -05:00
|
|
|
Lvalue::Global(cid) => self.modify_global(cid, init)?,
|
2016-11-03 06:31:04 -05:00
|
|
|
}
|
2016-10-14 04:31:45 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
"min_align_of" => {
|
|
|
|
let elem_ty = substs.type_at(0);
|
2016-11-17 10:23:40 -06:00
|
|
|
let elem_align = self.type_align(elem_ty)?;
|
2016-10-14 04:31:45 -05:00
|
|
|
let align_val = self.usize_primval(elem_align as u64);
|
|
|
|
self.write_primval(dest, align_val)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"pref_align_of" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-11-17 10:23:40 -06:00
|
|
|
let layout = self.type_layout(ty)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
let align = layout.align(&self.tcx.data_layout).pref();
|
2016-10-14 04:31:45 -05:00
|
|
|
let align_val = self.usize_primval(align);
|
|
|
|
self.write_primval(dest, align_val)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"move_val_init" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
|
|
|
self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"needs_drop" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-14 04:31:45 -05:00
|
|
|
let env = self.tcx.empty_parameter_environment();
|
|
|
|
let needs_drop = self.tcx.type_needs_drop_given_env(ty, &env);
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_bool(needs_drop))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"offset" => {
|
|
|
|
let pointee_ty = substs.type_at(0);
|
2016-11-17 10:23:40 -06:00
|
|
|
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as isize;
|
2016-10-21 04:17:53 -05:00
|
|
|
let offset = self.value_to_primval(arg_vals[1], isize)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_int("offset second arg not isize");
|
2016-09-20 09:05:30 -05:00
|
|
|
|
2016-10-21 04:17:53 -05:00
|
|
|
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
2016-09-22 08:47:16 -05:00
|
|
|
let result_ptr = ptr.offset(offset as isize * pointee_size);
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_ptr(result_ptr))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"overflowing_sub" => {
|
|
|
|
self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
"overflowing_mul" => {
|
|
|
|
self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
"overflowing_add" => {
|
|
|
|
self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
"powif32" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let f = self.value_to_primval(arg_vals[0], f32)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_f32("powif32 first arg not f32");
|
2016-10-21 04:17:53 -05:00
|
|
|
let i = self.value_to_primval(arg_vals[1], i32)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_int("powif32 second arg not i32");
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"powif64" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let f = self.value_to_primval(arg_vals[0], f64)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_f64("powif64 first arg not f64");
|
2016-10-21 04:17:53 -05:00
|
|
|
let i = self.value_to_primval(arg_vals[1], i32)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_int("powif64 second arg not i32");
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"sqrtf32" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let f = self.value_to_primval(arg_vals[0], f32)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_f32("sqrtf32 first arg not f32");
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_f32(f.sqrt()))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"sqrtf64" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let f = self.value_to_primval(arg_vals[0], f64)?
|
2016-10-14 04:52:23 -05:00
|
|
|
.expect_f64("sqrtf64 first arg not f64");
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::from_f64(f.sqrt()))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"size_of" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-11-11 06:07:41 -06:00
|
|
|
// FIXME: change the `box_free` lang item to take `T: ?Sized` and have it use the
|
|
|
|
// `size_of_val` intrinsic, then change this back to
|
|
|
|
// .expect("size_of intrinsic called on unsized value")
|
|
|
|
// see https://github.com/rust-lang/rust/pull/37708
|
2016-11-17 10:23:40 -06:00
|
|
|
let size = self.type_size(ty)?.unwrap_or(!0) as u64;
|
2016-10-14 04:31:45 -05:00
|
|
|
let size_val = self.usize_primval(size);
|
|
|
|
self.write_primval(dest, size_val)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"size_of_val" => {
|
|
|
|
let ty = substs.type_at(0);
|
2016-10-21 04:17:53 -05:00
|
|
|
let (size, _) = self.size_and_align_of_dst(ty, arg_vals[0])?;
|
2016-10-14 04:31:45 -05:00
|
|
|
let size_val = self.usize_primval(size);
|
|
|
|
self.write_primval(dest, size_val)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
2016-11-03 11:32:06 -05:00
|
|
|
|
|
|
|
"min_align_of_val" |
|
|
|
|
"align_of_val" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let (_, align) = self.size_and_align_of_dst(ty, arg_vals[0])?;
|
|
|
|
let align_val = self.usize_primval(align);
|
|
|
|
self.write_primval(dest, align_val)?;
|
|
|
|
}
|
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
"type_name" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let ty_name = ty.to_string();
|
2016-09-26 10:49:30 -05:00
|
|
|
let s = self.str_to_value(&ty_name)?;
|
|
|
|
self.write_value(s, dest, dest_ty)?;
|
2016-09-23 03:38:30 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
"type_id" => {
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
let n = self.tcx.type_id_hash(ty);
|
2016-10-20 05:42:19 -05:00
|
|
|
self.write_primval(dest, PrimVal::new(n, PrimValKind::U64))?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
"transmute" => {
|
2016-10-21 04:17:53 -05:00
|
|
|
let dest_ty = substs.type_at(1);
|
|
|
|
let val = match arg_vals[0] {
|
|
|
|
Value::ByVal(primval) =>
|
|
|
|
Value::ByVal(self.transmute_primval(primval, dest_ty)?),
|
|
|
|
v => v,
|
|
|
|
};
|
|
|
|
self.write_value(val, dest, dest_ty)?;
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
|
|
|
|
2016-10-14 04:31:45 -05:00
|
|
|
"uninit" => {
|
|
|
|
let size = dest_layout.size(&self.tcx.data_layout).bytes() as usize;
|
2016-11-03 07:13:47 -05:00
|
|
|
let uninit = |this: &mut Self, val: Option<Value>| {
|
|
|
|
match val {
|
|
|
|
Some(Value::ByRef(ptr)) => {
|
|
|
|
this.memory.mark_definedness(ptr, size, false)?;
|
|
|
|
Ok(Some(Value::ByRef(ptr)))
|
|
|
|
},
|
|
|
|
None => Ok(None),
|
|
|
|
Some(_) => Ok(None),
|
|
|
|
}
|
|
|
|
};
|
|
|
|
match dest {
|
|
|
|
Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
|
|
|
|
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => self.memory.mark_definedness(ptr, size, false)?,
|
|
|
|
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat ptr target"),
|
|
|
|
Lvalue::Global(cid) => self.modify_global(cid, uninit)?,
|
|
|
|
}
|
2016-10-14 04:31:45 -05:00
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
|
|
|
|
name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
|
|
|
|
}
|
|
|
|
|
2016-11-04 11:51:13 -05:00
|
|
|
self.goto_block(target);
|
|
|
|
|
2016-09-20 09:05:30 -05:00
|
|
|
// Since we pushed no stack frame, the main loop will act
|
|
|
|
// as if the call just completed and it's returning to the
|
|
|
|
// current frame.
|
|
|
|
Ok(())
|
|
|
|
}
|
2016-09-22 06:00:43 -05:00
|
|
|
|
|
|
|
fn size_and_align_of_dst(
|
|
|
|
&self,
|
|
|
|
ty: ty::Ty<'tcx>,
|
2016-09-23 03:27:14 -05:00
|
|
|
value: Value,
|
2016-09-22 06:00:43 -05:00
|
|
|
) -> EvalResult<'tcx, (u64, u64)> {
|
|
|
|
let pointer_size = self.memory.pointer_size();
|
2016-11-17 10:23:40 -06:00
|
|
|
if let Some(size) = self.type_size(ty)? {
|
|
|
|
Ok((size as u64, self.type_align(ty)? as u64))
|
2016-09-22 06:00:43 -05:00
|
|
|
} else {
|
|
|
|
match ty.sty {
|
|
|
|
ty::TyAdt(def, substs) => {
|
|
|
|
// First get the size of all statically known fields.
|
|
|
|
// Don't use type_of::sizing_type_of because that expects t to be sized,
|
|
|
|
// and it also rounds up to alignment, which we want to avoid,
|
|
|
|
// as the unsized field's alignment could be smaller.
|
|
|
|
assert!(!ty.is_simd());
|
2016-11-17 10:23:40 -06:00
|
|
|
let layout = self.type_layout(ty)?;
|
2016-09-22 06:00:43 -05:00
|
|
|
debug!("DST {} layout: {:?}", ty, layout);
|
|
|
|
|
|
|
|
let (sized_size, sized_align) = match *layout {
|
|
|
|
ty::layout::Layout::Univariant { ref variant, .. } => {
|
2016-10-03 21:45:50 -05:00
|
|
|
// The offset of the start of the last field gives the size of the
|
|
|
|
// sized part of the type.
|
|
|
|
let size = variant.offsets.last().map_or(0, |f| f.bytes());
|
|
|
|
(size, variant.align.abi())
|
2016-09-22 06:00:43 -05:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
|
|
|
|
ty, layout);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
debug!("DST {} statically sized prefix size: {} align: {}",
|
|
|
|
ty, sized_size, sized_align);
|
|
|
|
|
|
|
|
// Recurse to get the size of the dynamically sized field (must be
|
|
|
|
// the last field).
|
|
|
|
let last_field = def.struct_variant().fields.last().unwrap();
|
|
|
|
let field_ty = self.field_ty(substs, last_field);
|
|
|
|
let (unsized_size, unsized_align) = self.size_and_align_of_dst(field_ty, value)?;
|
|
|
|
|
|
|
|
// FIXME (#26403, #27023): We should be adding padding
|
|
|
|
// to `sized_size` (to accommodate the `unsized_align`
|
|
|
|
// required of the unsized field that follows) before
|
|
|
|
// summing it with `sized_size`. (Note that since #26403
|
|
|
|
// is unfixed, we do not yet add the necessary padding
|
|
|
|
// here. But this is where the add would go.)
|
|
|
|
|
|
|
|
// Return the sum of sizes and max of aligns.
|
|
|
|
let size = sized_size + unsized_size;
|
|
|
|
|
|
|
|
// Choose max of two known alignments (combined value must
|
|
|
|
// be aligned according to more restrictive of the two).
|
|
|
|
let align = ::std::cmp::max(sized_align, unsized_align);
|
|
|
|
|
|
|
|
// Issue #27023: must add any necessary padding to `size`
|
|
|
|
// (to make it a multiple of `align`) before returning it.
|
|
|
|
//
|
|
|
|
// Namely, the returned size should be, in C notation:
|
|
|
|
//
|
|
|
|
// `size + ((size & (align-1)) ? align : 0)`
|
|
|
|
//
|
|
|
|
// emulated via the semi-standard fast bit trick:
|
|
|
|
//
|
|
|
|
// `(size + (align-1)) & -align`
|
|
|
|
|
|
|
|
if size & (align - 1) != 0 {
|
|
|
|
Ok((size + align, align))
|
|
|
|
} else {
|
|
|
|
Ok((size, align))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ty::TyTrait(..) => {
|
2016-10-16 03:12:26 -05:00
|
|
|
let (_, vtable) = value.expect_ptr_vtable_pair(&self.memory)?;
|
2016-09-22 06:00:43 -05:00
|
|
|
// the second entry in the vtable is the dynamic size of the object.
|
|
|
|
let size = self.memory.read_usize(vtable.offset(pointer_size as isize))?;
|
|
|
|
let align = self.memory.read_usize(vtable.offset(pointer_size as isize * 2))?;
|
|
|
|
Ok((size, align))
|
|
|
|
}
|
|
|
|
|
|
|
|
ty::TySlice(_) | ty::TyStr => {
|
|
|
|
let elem_ty = ty.sequence_element_type(self.tcx);
|
2016-11-17 10:23:40 -06:00
|
|
|
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64;
|
2016-11-15 07:12:49 -06:00
|
|
|
let (_, len) = value.expect_slice(&self.memory)?;
|
2016-11-17 10:23:40 -06:00
|
|
|
let align = self.type_align(elem_ty)?;
|
2016-09-23 03:27:14 -05:00
|
|
|
Ok((len * elem_size, align as u64))
|
2016-09-22 06:00:43 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
_ => bug!("size_of_val::<{:?}>", ty),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/// Returns the normalized type of a struct field
|
|
|
|
fn field_ty(
|
|
|
|
&self,
|
|
|
|
param_substs: &Substs<'tcx>,
|
|
|
|
f: ty::FieldDef<'tcx>,
|
|
|
|
)-> ty::Ty<'tcx> {
|
|
|
|
self.tcx.normalize_associated_type(&f.ty(self.tcx, param_substs))
|
|
|
|
}
|
2016-09-20 09:05:30 -05:00
|
|
|
}
|
2016-09-29 09:42:01 -05:00
|
|
|
|
2016-10-20 05:42:19 -05:00
|
|
|
macro_rules! integer_intrinsic {
|
|
|
|
($name:expr, $val:expr, $method:ident) => ({
|
|
|
|
let val = $val;
|
|
|
|
|
|
|
|
use primval::PrimValKind::*;
|
|
|
|
let bits = match val.kind {
|
|
|
|
I8 => (val.bits as i8).$method() as u64,
|
|
|
|
U8 => (val.bits as u8).$method() as u64,
|
|
|
|
I16 => (val.bits as i16).$method() as u64,
|
|
|
|
U16 => (val.bits as u16).$method() as u64,
|
|
|
|
I32 => (val.bits as i32).$method() as u64,
|
|
|
|
U32 => (val.bits as u32).$method() as u64,
|
|
|
|
I64 => (val.bits as i64).$method() as u64,
|
|
|
|
U64 => (val.bits as u64).$method() as u64,
|
|
|
|
_ => bug!("invalid `{}` argument: {:?}", $name, val),
|
|
|
|
};
|
|
|
|
|
|
|
|
PrimVal::new(bits, val.kind)
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-09-29 09:42:01 -05:00
|
|
|
fn numeric_intrinsic(name: &str, val: PrimVal) -> PrimVal {
|
|
|
|
match name {
|
2016-10-20 05:42:19 -05:00
|
|
|
"bswap" => integer_intrinsic!("bswap", val, swap_bytes),
|
|
|
|
"ctlz" => integer_intrinsic!("ctlz", val, leading_zeros),
|
|
|
|
"ctpop" => integer_intrinsic!("ctpop", val, count_ones),
|
|
|
|
"cttz" => integer_intrinsic!("cttz", val, trailing_zeros),
|
|
|
|
_ => bug!("not a numeric intrinsic: {}", name),
|
2016-09-29 09:42:01 -05:00
|
|
|
}
|
|
|
|
}
|