rust/src/interpreter/mod.rs

1510 lines
60 KiB
Rust
Raw Normal View History

2016-03-31 23:34:07 -05:00
use rustc::middle::const_val;
2016-04-13 17:01:00 -05:00
use rustc::hir::def_id::DefId;
2016-02-18 19:06:22 -06:00
use rustc::mir::mir_map::MirMap;
2016-03-07 10:14:47 -06:00
use rustc::mir::repr as mir;
2016-03-28 18:43:23 -05:00
use rustc::traits::{self, ProjectionMode};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::layout::{self, Layout, Size};
2016-03-28 18:43:23 -05:00
use rustc::ty::subst::{self, Subst, Substs};
2016-06-08 06:43:34 -05:00
use rustc::ty::{self, Ty, TyCtxt, BareFnTy};
use rustc::util::nodemap::DefIdMap;
2016-06-11 13:38:28 -05:00
use rustc_data_structures::indexed_vec::Idx;
use std::cell::RefCell;
use std::ops::Deref;
use std::rc::Rc;
2016-04-07 06:56:07 -05:00
use std::{iter, mem};
use syntax::ast;
use syntax::attr;
2016-06-08 06:43:34 -05:00
use syntax::codemap::{self, DUMMY_SP, Span};
use error::{EvalError, EvalResult};
use memory::{Memory, Pointer, FunctionDefinition};
use primval::{self, PrimVal};
use std::collections::HashMap;
2016-06-01 12:17:18 -05:00
mod stepper;
2016-06-01 10:05:20 -05:00
pub fn step<'ecx, 'a: 'ecx, 'tcx: 'a>(ecx: &'ecx mut EvalContext<'a, 'tcx>) -> EvalResult<'tcx, bool> {
stepper::Stepper::new(ecx).step()
}
pub struct EvalContext<'a, 'tcx: 'a> {
/// The results of the type checker, from rustc.
2016-05-13 23:34:50 -05:00
tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// A mapping from NodeIds to Mir, from rustc. Only contains MIR for crate-local items.
mir_map: &'a MirMap<'tcx>,
/// A local cache from DefIds to Mir for non-crate-local items.
mir_cache: RefCell<DefIdMap<Rc<mir::Mir<'tcx>>>>,
/// The virtual memory system.
2016-06-08 06:43:34 -05:00
memory: Memory<'tcx>,
/// Precomputed statics, constants and promoteds.
2016-06-03 10:41:36 -05:00
statics: HashMap<ConstantId<'tcx>, Pointer>,
/// The virtual call stack.
stack: Vec<Frame<'a, 'tcx>>,
}
/// A stack frame.
pub struct Frame<'a, 'tcx: 'a> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
pub mir: CachedMir<'a, 'tcx>,
/// The def_id of the current function.
pub def_id: DefId,
/// type substitutions for the current function invocation.
pub substs: &'tcx Substs<'tcx>,
/// The span of the call site.
pub span: codemap::Span,
////////////////////////////////////////////////////////////////////////////////
// Return pointer and local allocations
////////////////////////////////////////////////////////////////////////////////
/// A pointer for writing the return value of the current call if it's not a diverging call.
pub return_ptr: Option<Pointer>,
/// The list of locals for the current function, stored in order as
/// `[arguments..., variables..., temporaries...]`. The variables begin at `self.var_offset`
/// and the temporaries at `self.temp_offset`.
pub locals: Vec<Pointer>,
/// The offset of the first variable in `self.locals`.
pub var_offset: usize,
/// The offset of the first temporary in `self.locals`.
pub temp_offset: usize,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
/// The block that is currently executed (or will be executed after the above call stacks
/// return).
pub block: mir::BasicBlock,
/// The index of the currently evaluated statment.
pub stmt: usize,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct Lvalue {
ptr: Pointer,
extra: LvalueExtra,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum LvalueExtra {
None,
Length(u64),
2016-05-09 21:08:37 -05:00
// TODO(solson): Vtable(memory::AllocId),
DowncastVariant(usize),
}
2016-03-14 22:48:00 -05:00
#[derive(Clone)]
pub enum CachedMir<'mir, 'tcx: 'mir> {
2016-03-14 22:48:00 -05:00
Ref(&'mir mir::Mir<'tcx>),
Owned(Rc<mir::Mir<'tcx>>)
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
2016-06-08 05:35:15 -05:00
/// Uniquely identifies a specific constant or static
struct ConstantId<'tcx> {
2016-06-08 05:35:15 -05:00
/// the def id of the constant/static or in case of promoteds, the def id of the function they belong to
def_id: DefId,
2016-06-08 05:35:15 -05:00
/// In case of statics and constants this is `Substs::empty()`, so only promoteds and associated
/// constants actually have something useful here. We could special case statics and constants,
/// but that would only require more branching when working with constants, and not bring any
/// real benefits.
substs: &'tcx Substs<'tcx>,
kind: ConstantKind,
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
enum ConstantKind {
2016-06-11 13:38:28 -05:00
Promoted(mir::Promoted),
2016-06-08 05:35:15 -05:00
/// Statics, constants and associated constants
Global,
2016-06-03 10:41:36 -05:00
}
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir_map: &'a MirMap<'tcx>) -> Self {
EvalContext {
2015-11-19 07:07:47 -06:00
tcx: tcx,
mir_map: mir_map,
mir_cache: RefCell::new(DefIdMap()),
2016-05-31 05:05:25 -05:00
memory: Memory::new(tcx.sess
.target
.uint_type
.bit_width()
.expect("Session::target::uint_type was usize")/8),
2016-06-03 10:41:36 -05:00
statics: HashMap::new(),
stack: Vec::new(),
}
}
pub fn alloc_ret_ptr(&mut self, output_ty: ty::FnOutput<'tcx>, substs: &'tcx Substs<'tcx>) -> Option<Pointer> {
2016-06-08 05:30:25 -05:00
match output_ty {
ty::FnConverging(ty) => {
let size = self.type_size_with_substs(ty, substs);
Some(self.memory.allocate(size))
}
ty::FnDiverging => None,
}
}
2016-06-08 04:11:08 -05:00
pub fn memory(&self) -> &Memory {
&self.memory
}
2016-06-15 05:55:04 -05:00
pub fn memory_mut(&mut self) -> &mut Memory<'tcx> {
&mut self.memory
}
pub fn stack(&self) -> &[Frame] {
&self.stack
}
// TODO(solson): Try making const_to_primval instead.
fn const_to_ptr(&mut self, const_val: &const_val::ConstVal) -> EvalResult<'tcx, Pointer> {
use rustc::middle::const_val::ConstVal::*;
match *const_val {
Float(_f) => unimplemented!(),
Integral(int) => {
// TODO(solson): Check int constant type.
let ptr = self.memory.allocate(8);
self.memory.write_uint(ptr, int.to_u64_unchecked(), 8)?;
Ok(ptr)
}
Str(ref s) => {
let psize = self.memory.pointer_size;
let static_ptr = self.memory.allocate(s.len());
let ptr = self.memory.allocate(psize * 2);
self.memory.write_bytes(static_ptr, s.as_bytes())?;
self.memory.write_ptr(ptr, static_ptr)?;
self.memory.write_usize(ptr.offset(psize as isize), s.len() as u64)?;
Ok(ptr)
}
ByteStr(ref bs) => {
let psize = self.memory.pointer_size;
let static_ptr = self.memory.allocate(bs.len());
let ptr = self.memory.allocate(psize);
self.memory.write_bytes(static_ptr, bs)?;
self.memory.write_ptr(ptr, static_ptr)?;
Ok(ptr)
}
Bool(b) => {
let ptr = self.memory.allocate(1);
self.memory.write_bool(ptr, b)?;
Ok(ptr)
}
Char(_c) => unimplemented!(),
Struct(_node_id) => unimplemented!(),
Tuple(_node_id) => unimplemented!(),
Function(_def_id) => unimplemented!(),
Array(_, _) => unimplemented!(),
Repeat(_, _) => unimplemented!(),
Dummy => unimplemented!(),
}
}
fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment())
}
fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
ty.is_sized(self.tcx, &self.tcx.empty_parameter_environment(), DUMMY_SP)
}
fn fulfill_obligation(&self, trait_ref: ty::PolyTraitRef<'tcx>) -> traits::Vtable<'tcx, ()> {
// Do the initial selection for the obligation. This yields the shallow result we are
// looking for -- that is, what specific impl.
self.tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
let mut selcx = traits::SelectionContext::new(&infcx);
let obligation = traits::Obligation::new(
traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID),
trait_ref.to_poly_trait_predicate(),
);
let selection = selcx.select(&obligation).unwrap().unwrap();
// Currently, we use a fulfillment context to completely resolve all nested obligations.
// This is because they can inform the inference of the impl's type parameters.
let mut fulfill_cx = traits::FulfillmentContext::new();
let vtable = selection.map(|predicate| {
fulfill_cx.register_predicate_obligation(&infcx, predicate);
});
infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable)
})
}
/// Trait method, which has to be resolved to an impl method.
pub fn trait_method(
&self,
def_id: DefId,
substs: &'tcx Substs<'tcx>
) -> (DefId, &'tcx Substs<'tcx>) {
let method_item = self.tcx.impl_or_trait_item(def_id);
let trait_id = method_item.container().id();
let trait_ref = ty::Binder(substs.to_trait_ref(self.tcx, trait_id));
match self.fulfill_obligation(trait_ref) {
traits::VtableImpl(vtable_impl) => {
let impl_did = vtable_impl.impl_def_id;
let mname = self.tcx.item_name(def_id);
// Create a concatenated set of substitutions which includes those from the impl
// and those from the method:
let impl_substs = vtable_impl.substs.with_method_from(substs);
let substs = self.tcx.mk_substs(impl_substs);
let mth = get_impl_method(self.tcx, impl_did, substs, mname);
(mth.method.def_id, mth.substs)
}
traits::VtableClosure(vtable_closure) =>
(vtable_closure.closure_def_id, vtable_closure.substs.func_substs),
traits::VtableFnPointer(_fn_ty) => {
let _trait_closure_kind = self.tcx.lang_items.fn_trait_kind(trait_id).unwrap();
unimplemented!()
// let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, fn_ty);
// let method_ty = def_ty(tcx, def_id, substs);
// let fn_ptr_ty = match method_ty.sty {
// ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
// _ => unreachable!("expected fn item type, found {}",
// method_ty)
// };
// Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
}
traits::VtableObject(ref _data) => {
unimplemented!()
// Callee {
// data: Virtual(traits::get_vtable_index_of_object_method(
// tcx, data, def_id)),
// ty: def_ty(tcx, def_id, substs)
// }
}
vtable => unreachable!("resolved vtable bad vtable {:?} in trans", vtable),
}
}
fn load_mir(&self, def_id: DefId) -> CachedMir<'a, 'tcx> {
use rustc_trans::back::symbol_names::def_id_to_string;
match self.tcx.map.as_local_node_id(def_id) {
Some(node_id) => CachedMir::Ref(self.mir_map.map.get(&node_id).unwrap()),
None => {
let mut mir_cache = self.mir_cache.borrow_mut();
if let Some(mir) = mir_cache.get(&def_id) {
return CachedMir::Owned(mir.clone());
}
let cs = &self.tcx.sess.cstore;
let mir = cs.maybe_get_item_mir(self.tcx, def_id).unwrap_or_else(|| {
panic!("no mir for `{}`", def_id_to_string(self.tcx, def_id));
});
let cached = Rc::new(mir);
mir_cache.insert(def_id, cached.clone());
CachedMir::Owned(cached)
}
}
}
2016-06-08 04:11:08 -05:00
fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
let substituted = ty.subst(self.tcx, substs);
self.tcx.normalize_associated_type(&substituted)
}
fn type_size(&self, ty: Ty<'tcx>) -> usize {
self.type_size_with_substs(ty, self.substs())
2016-06-08 04:11:08 -05:00
}
fn type_size_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> usize {
self.type_layout_with_substs(ty, substs).size(&self.tcx.data_layout).bytes() as usize
}
fn type_layout(&self, ty: Ty<'tcx>) -> &'tcx Layout {
self.type_layout_with_substs(ty, self.substs())
}
fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> &'tcx Layout {
2016-06-08 04:11:08 -05:00
// TODO(solson): Is this inefficient? Needs investigation.
let ty = self.monomorphize(ty, substs);
self.tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
// TODO(solson): Report this error properly.
ty.layout(&infcx).unwrap()
})
}
pub fn push_stack_frame(&mut self, def_id: DefId, span: codemap::Span, mir: CachedMir<'a, 'tcx>, substs: &'tcx Substs<'tcx>,
return_ptr: Option<Pointer>)
{
2016-03-18 11:48:31 -05:00
let arg_tys = mir.arg_decls.iter().map(|a| a.ty);
let var_tys = mir.var_decls.iter().map(|v| v.ty);
let temp_tys = mir.temp_decls.iter().map(|t| t.ty);
2016-03-18 11:48:31 -05:00
let num_args = mir.arg_decls.len();
let num_vars = mir.var_decls.len();
::log_settings::settings().indentation += 1;
let locals: Vec<Pointer> = arg_tys.chain(var_tys).chain(temp_tys).map(|ty| {
let size = self.type_size_with_substs(ty, substs);
self.memory.allocate(size)
}).collect();
self.stack.push(Frame {
mir: mir.clone(),
block: mir::START_BLOCK,
return_ptr: return_ptr,
locals: locals,
var_offset: num_args,
temp_offset: num_args + num_vars,
span: span,
def_id: def_id,
substs: substs,
stmt: 0,
});
}
fn pop_stack_frame(&mut self) {
::log_settings::settings().indentation -= 1;
let _frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
2016-05-09 21:08:37 -05:00
// TODO(solson): Deallocate local variables.
}
fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>)
-> EvalResult<'tcx, ()> {
2016-03-28 18:43:23 -05:00
use rustc::mir::repr::TerminatorKind::*;
2016-06-09 09:08:34 -05:00
match terminator.kind {
Return => self.pop_stack_frame(),
2016-06-03 09:51:51 -05:00
Goto { target } => {
self.frame_mut().block = target;
2016-06-03 09:51:51 -05:00
},
2016-03-07 08:22:18 -06:00
If { ref cond, targets: (then_target, else_target) } => {
2016-05-09 19:52:44 -05:00
let cond_ptr = self.eval_operand(cond)?;
let cond_val = self.memory.read_bool(cond_ptr)?;
self.frame_mut().block = if cond_val { then_target } else { else_target };
}
2016-03-07 08:22:18 -06:00
SwitchInt { ref discr, ref values, ref targets, .. } => {
2016-05-09 19:52:44 -05:00
let discr_ptr = self.eval_lvalue(discr)?.to_ptr();
let discr_size = self
.type_layout(self.lvalue_ty(discr))
.size(&self.tcx.data_layout)
.bytes() as usize;
2016-05-09 19:52:44 -05:00
let discr_val = self.memory.read_uint(discr_ptr, discr_size)?;
2016-03-07 08:22:18 -06:00
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets[targets.len() - 1];
2016-03-07 04:48:12 -06:00
for (index, val_const) in values.iter().enumerate() {
2016-05-09 19:52:44 -05:00
let ptr = self.const_to_ptr(val_const)?;
let val = self.memory.read_uint(ptr, discr_size)?;
if discr_val == val {
target_block = targets[index];
break;
}
}
2016-03-13 07:30:28 -05:00
self.frame_mut().block = target_block;
}
Switch { ref discr, ref targets, adt_def } => {
2016-05-09 19:52:44 -05:00
let adt_ptr = self.eval_lvalue(discr)?.to_ptr();
2016-05-10 00:41:57 -05:00
let adt_ty = self.lvalue_ty(discr);
let discr_val = self.read_discriminant_value(adt_ptr, adt_ty)?;
let matching = adt_def.variants.iter()
.position(|v| discr_val == v.disr_val.to_u64_unchecked());
match matching {
2016-06-03 09:51:51 -05:00
Some(i) => {
self.frame_mut().block = targets[i];
2016-06-03 09:51:51 -05:00
},
2016-05-10 00:41:57 -05:00
None => return Err(EvalError::InvalidDiscriminant),
}
}
Call { ref func, ref args, ref destination, .. } => {
let mut return_ptr = None;
if let Some((ref lv, target)) = *destination {
self.frame_mut().block = target;
2016-05-09 19:52:44 -05:00
return_ptr = Some(self.eval_lvalue(lv)?.to_ptr());
}
2016-03-18 11:48:31 -05:00
let func_ty = self.operand_ty(func);
match func_ty.sty {
2016-06-08 06:43:34 -05:00
ty::TyFnPtr(bare_fn_ty) => {
let ptr = self.eval_operand(func)?;
assert_eq!(ptr.offset, 0);
let fn_ptr = self.memory.read_ptr(ptr)?;
let FunctionDefinition { def_id, substs, fn_ty } = self.memory.get_fn(fn_ptr.alloc_id)?;
if fn_ty != bare_fn_ty {
return Err(EvalError::FunctionPointerTyMismatch(fn_ty, bare_fn_ty));
}
self.eval_fn_call(def_id, substs, bare_fn_ty, return_ptr, args,
terminator.source_info.span)?
2016-06-08 06:43:34 -05:00
},
ty::TyFnDef(def_id, substs, fn_ty) => {
self.eval_fn_call(def_id, substs, fn_ty, return_ptr, args,
terminator.source_info.span)?
}
2016-05-30 08:27:52 -05:00
_ => return Err(EvalError::Unimplemented(format!("can't handle callee of type {:?}", func_ty))),
2016-02-18 19:06:22 -06:00
}
}
2016-06-11 13:38:28 -05:00
Drop { ref location, target, .. } => {
let ptr = self.eval_lvalue(location)?.to_ptr();
let ty = self.lvalue_ty(location);
2016-05-09 19:52:44 -05:00
self.drop(ptr, ty)?;
self.frame_mut().block = target;
}
2016-06-11 13:38:28 -05:00
Assert { ref cond, expected, ref msg, target, cleanup } => {
let actual_ptr = self.eval_operand(cond)?;
let actual = self.memory.read_bool(actual_ptr)?;
if actual == expected {
self.frame_mut().block = target;
2016-06-11 13:38:28 -05:00
} else {
panic!("unimplemented: jump to {:?} and print {:?}", cleanup, msg);
}
}
DropAndReplace { .. } => unimplemented!(),
Resume => unimplemented!(),
2016-06-11 13:38:28 -05:00
Unreachable => unimplemented!(),
2016-06-09 09:08:34 -05:00
}
2016-06-09 09:08:34 -05:00
Ok(())
}
2016-06-08 06:43:34 -05:00
pub fn eval_fn_call(
&mut self,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
fn_ty: &'tcx BareFnTy,
return_ptr: Option<Pointer>,
args: &[mir::Operand<'tcx>],
span: Span,
) -> EvalResult<'tcx, ()> {
2016-06-08 06:43:34 -05:00
use syntax::abi::Abi;
match fn_ty.abi {
Abi::RustIntrinsic => {
let name = self.tcx.item_name(def_id).as_str();
match fn_ty.sig.0.output {
ty::FnConverging(ty) => {
let size = self.type_size(ty);
2016-06-08 06:43:34 -05:00
let ret = return_ptr.unwrap();
self.call_intrinsic(&name, substs, args, ret, size)
}
ty::FnDiverging => unimplemented!(),
}
}
Abi::C => {
match fn_ty.sig.0.output {
ty::FnConverging(ty) => {
let size = self.type_size(ty);
2016-06-08 06:43:34 -05:00
self.call_c_abi(def_id, args, return_ptr.unwrap(), size)
}
ty::FnDiverging => unimplemented!(),
}
}
Abi::Rust | Abi::RustCall => {
// TODO(solson): Adjust the first argument when calling a Fn or
// FnMut closure via FnOnce::call_once.
// Only trait methods can have a Self parameter.
let (resolved_def_id, resolved_substs) = if substs.self_ty().is_some() {
self.trait_method(def_id, substs)
} else {
(def_id, substs)
};
let mut arg_srcs = Vec::new();
for arg in args {
let src = self.eval_operand(arg)?;
let src_ty = self.operand_ty(arg);
arg_srcs.push((src, src_ty));
}
if fn_ty.abi == Abi::RustCall && !args.is_empty() {
arg_srcs.pop();
let last_arg = args.last().unwrap();
let last = self.eval_operand(last_arg)?;
let last_ty = self.operand_ty(last_arg);
let last_layout = self.type_layout(last_ty);
2016-06-08 06:43:34 -05:00
match (&last_ty.sty, last_layout) {
(&ty::TyTuple(fields),
&Layout::Univariant { ref variant, .. }) => {
let offsets = iter::once(0)
.chain(variant.offset_after_field.iter()
.map(|s| s.bytes()));
for (offset, ty) in offsets.zip(fields) {
let src = last.offset(offset as isize);
arg_srcs.push((src, ty));
}
}
ty => panic!("expected tuple as last argument in function with 'rust-call' ABI, got {:?}", ty),
}
}
let mir = self.load_mir(resolved_def_id);
self.push_stack_frame(def_id, span, mir, resolved_substs, return_ptr);
for (i, (src, src_ty)) in arg_srcs.into_iter().enumerate() {
let dest = self.frame().locals[i];
self.move_(src, dest, src_ty)?;
}
Ok(())
}
abi => Err(EvalError::Unimplemented(format!("can't handle function with {:?} ABI", abi))),
}
}
fn drop(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, ()> {
2016-04-07 04:02:02 -05:00
if !self.type_needs_drop(ty) {
debug!("no need to drop {:?}", ty);
2016-04-07 04:02:02 -05:00
return Ok(());
}
trace!("-need to drop {:?}", ty);
2016-04-07 04:02:02 -05:00
2016-05-09 21:08:37 -05:00
// TODO(solson): Call user-defined Drop::drop impls.
2016-04-07 06:56:07 -05:00
2016-04-07 04:02:02 -05:00
match ty.sty {
ty::TyBox(contents_ty) => {
2016-04-07 06:56:07 -05:00
match self.memory.read_ptr(ptr) {
Ok(contents_ptr) => {
2016-05-09 19:52:44 -05:00
self.drop(contents_ptr, contents_ty)?;
trace!("-deallocating box");
2016-05-09 19:52:44 -05:00
self.memory.deallocate(contents_ptr)?;
2016-04-07 06:56:07 -05:00
}
Err(EvalError::ReadBytesAsPointer) => {
let size = self.memory.pointer_size;
2016-05-09 19:52:44 -05:00
let possible_drop_fill = self.memory.read_bytes(ptr, size)?;
if possible_drop_fill.iter().all(|&b| b == mem::POST_DROP_U8) {
2016-04-07 06:56:07 -05:00
return Ok(());
} else {
return Err(EvalError::ReadBytesAsPointer);
}
}
Err(e) => return Err(e),
}
2016-04-07 04:02:02 -05:00
}
2016-05-09 21:08:37 -05:00
// TODO(solson): Implement drop for other relevant types (e.g. aggregates).
2016-04-07 04:02:02 -05:00
_ => {}
}
2016-04-07 06:56:07 -05:00
// Filling drop.
2016-05-09 21:08:37 -05:00
// FIXME(solson): Trait objects (with no static size) probably get filled, too.
let size = self.type_size(ty);
2016-05-09 19:52:44 -05:00
self.memory.drop_fill(ptr, size)?;
2016-04-07 06:56:07 -05:00
2016-04-07 04:02:02 -05:00
Ok(())
}
fn read_discriminant_value(&self, adt_ptr: Pointer, adt_ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
2016-05-10 00:41:57 -05:00
use rustc::ty::layout::Layout::*;
let adt_layout = self.type_layout(adt_ty);
2016-05-10 00:41:57 -05:00
let discr_val = match *adt_layout {
General { discr, .. } | CEnum { discr, .. } => {
let discr_size = discr.size().bytes();
self.memory.read_uint(adt_ptr, discr_size as usize)?
}
RawNullablePointer { nndiscr, .. } => {
self.read_nonnull_discriminant_value(adt_ptr, nndiscr)?
2016-05-10 00:41:57 -05:00
}
StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
2016-05-30 08:27:52 -05:00
let offset = self.nonnull_offset(adt_ty, nndiscr, discrfield)?;
let nonnull = adt_ptr.offset(offset.bytes() as isize);
self.read_nonnull_discriminant_value(nonnull, nndiscr)?
}
2016-05-10 00:41:57 -05:00
// The discriminant_value intrinsic returns 0 for non-sum types.
Array { .. } | FatPointer { .. } | Scalar { .. } | Univariant { .. } |
Vector { .. } => 0,
};
Ok(discr_val)
}
fn read_nonnull_discriminant_value(&self, ptr: Pointer, nndiscr: u64) -> EvalResult<'tcx, u64> {
let not_null = match self.memory.read_usize(ptr) {
Ok(0) => false,
Ok(_) | Err(EvalError::ReadPointerAsBytes) => true,
Err(e) => return Err(e),
};
assert!(nndiscr == 0 || nndiscr == 1);
Ok(if not_null { nndiscr } else { 1 - nndiscr })
}
fn call_intrinsic(
&mut self,
name: &str,
substs: &'tcx Substs<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Pointer,
dest_size: usize
) -> EvalResult<'tcx, ()> {
let args_res: EvalResult<Vec<Pointer>> = args.iter()
.map(|arg| self.eval_operand(arg))
.collect();
2016-05-09 19:52:44 -05:00
let args = args_res?;
2016-03-17 07:00:27 -05:00
match name {
2016-05-10 00:41:57 -05:00
// FIXME(solson): Handle different integer types correctly.
"add_with_overflow" => {
let ty = *substs.types.get(subst::FnSpace, 0);
let size = self.type_size(ty);
2016-05-10 00:41:57 -05:00
let left = self.memory.read_int(args[0], size)?;
let right = self.memory.read_int(args[1], size)?;
let (n, overflowed) = unsafe {
::std::intrinsics::add_with_overflow::<i64>(left, right)
};
self.memory.write_int(dest, n, size)?;
self.memory.write_bool(dest.offset(size as isize), overflowed)?;
}
"assume" => {}
2016-03-18 12:52:28 -05:00
"copy_nonoverlapping" => {
let elem_ty = *substs.types.get(subst::FnSpace, 0);
let elem_size = self.type_size(elem_ty);
2016-05-09 19:52:44 -05:00
let src = self.memory.read_ptr(args[0])?;
let dest = self.memory.read_ptr(args[1])?;
let count = self.memory.read_isize(args[2])?;
self.memory.copy(src, dest, count as usize * elem_size)?;
2016-03-17 07:00:27 -05:00
}
2016-05-10 00:41:57 -05:00
"discriminant_value" => {
let ty = *substs.types.get(subst::FnSpace, 0);
let adt_ptr = self.memory.read_ptr(args[0])?;
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
self.memory.write_uint(dest, discr_val, dest_size)?;
}
2016-04-07 06:56:07 -05:00
"forget" => {
let arg_ty = *substs.types.get(subst::FnSpace, 0);
let arg_size = self.type_size(arg_ty);
2016-05-09 19:52:44 -05:00
self.memory.drop_fill(args[0], arg_size)?;
2016-04-07 06:56:07 -05:00
}
2016-05-09 19:52:44 -05:00
"init" => self.memory.write_repeat(dest, 0, dest_size)?,
2016-03-18 12:52:28 -05:00
"min_align_of" => {
2016-05-09 19:52:44 -05:00
self.memory.write_int(dest, 1, dest_size)?;
}
2016-03-20 20:23:57 -05:00
"move_val_init" => {
let ty = *substs.types.get(subst::FnSpace, 0);
2016-05-09 19:52:44 -05:00
let ptr = self.memory.read_ptr(args[0])?;
self.move_(args[1], ptr, ty)?;
2016-03-20 20:23:57 -05:00
}
2016-05-09 21:08:37 -05:00
// FIXME(solson): Handle different integer types correctly.
"mul_with_overflow" => {
let ty = *substs.types.get(subst::FnSpace, 0);
let size = self.type_size(ty);
2016-05-09 19:52:44 -05:00
let left = self.memory.read_int(args[0], size)?;
let right = self.memory.read_int(args[1], size)?;
let (n, overflowed) = unsafe {
::std::intrinsics::mul_with_overflow::<i64>(left, right)
};
2016-05-09 19:52:44 -05:00
self.memory.write_int(dest, n, size)?;
self.memory.write_bool(dest.offset(size as isize), overflowed)?;
}
2016-03-17 07:00:27 -05:00
"offset" => {
let pointee_ty = *substs.types.get(subst::FnSpace, 0);
let pointee_size = self.type_size(pointee_ty) as isize;
let ptr_arg = args[0];
2016-05-09 19:52:44 -05:00
let offset = self.memory.read_isize(args[1])?;
2016-03-18 12:53:24 -05:00
match self.memory.read_ptr(ptr_arg) {
Ok(ptr) => {
let result_ptr = ptr.offset(offset as isize * pointee_size);
2016-05-09 19:52:44 -05:00
self.memory.write_ptr(dest, result_ptr)?;
}
Err(EvalError::ReadBytesAsPointer) => {
2016-05-09 19:52:44 -05:00
let addr = self.memory.read_isize(ptr_arg)?;
let result_addr = addr + offset * pointee_size as i64;
2016-05-09 19:52:44 -05:00
self.memory.write_isize(dest, result_addr)?;
}
Err(e) => return Err(e),
}
2016-03-17 07:00:27 -05:00
}
2016-05-09 21:08:37 -05:00
// FIXME(solson): Handle different integer types correctly. Use primvals?
"overflowing_sub" => {
let ty = *substs.types.get(subst::FnSpace, 0);
let size = self.type_size(ty);
2016-05-09 19:52:44 -05:00
let left = self.memory.read_int(args[0], size)?;
let right = self.memory.read_int(args[1], size)?;
let n = left.wrapping_sub(right);
2016-05-09 19:52:44 -05:00
self.memory.write_int(dest, n, size)?;
}
2016-03-18 12:52:28 -05:00
"size_of" => {
let ty = *substs.types.get(subst::FnSpace, 0);
let size = self.type_size(ty) as u64;
2016-05-09 19:52:44 -05:00
self.memory.write_uint(dest, size, dest_size)?;
2016-03-18 12:52:28 -05:00
}
2016-05-09 21:44:42 -05:00
"size_of_val" => {
let ty = *substs.types.get(subst::FnSpace, 0);
if self.type_is_sized(ty) {
let size = self.type_size(ty) as u64;
2016-05-09 21:44:42 -05:00
self.memory.write_uint(dest, size, dest_size)?;
} else {
2016-05-09 22:01:12 -05:00
match ty.sty {
ty::TySlice(_) | ty::TyStr => {
let elem_ty = ty.sequence_element_type(self.tcx);
let elem_size = self.type_size(elem_ty) as u64;
2016-05-09 22:01:12 -05:00
let ptr_size = self.memory.pointer_size as isize;
let n = self.memory.read_usize(args[0].offset(ptr_size))?;
self.memory.write_uint(dest, n * elem_size, dest_size)?;
}
2016-05-30 08:27:52 -05:00
_ => return Err(EvalError::Unimplemented(format!("unimplemented: size_of_val::<{:?}>", ty))),
2016-05-09 22:01:12 -05:00
}
2016-05-09 21:44:42 -05:00
}
}
2016-04-07 06:56:07 -05:00
"transmute" => {
let ty = *substs.types.get(subst::FnSpace, 0);
2016-05-09 19:52:44 -05:00
self.move_(args[0], dest, ty)?;
2016-04-07 06:56:07 -05:00
}
2016-05-09 19:52:44 -05:00
"uninit" => self.memory.mark_definedness(dest, dest_size, false)?,
2016-03-18 12:52:28 -05:00
2016-05-30 08:27:52 -05:00
name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
2016-03-17 07:00:27 -05:00
}
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
2016-06-09 09:08:34 -05:00
Ok(())
2016-03-17 07:00:27 -05:00
}
fn call_c_abi(
&mut self,
def_id: DefId,
args: &[mir::Operand<'tcx>],
2016-05-09 22:53:20 -05:00
dest: Pointer,
dest_size: usize,
) -> EvalResult<'tcx, ()> {
let name = self.tcx.item_name(def_id);
let attrs = self.tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(ln) => ln.clone(),
None => name.as_str(),
};
let args_res: EvalResult<Vec<Pointer>> = args.iter()
.map(|arg| self.eval_operand(arg))
.collect();
2016-05-09 19:52:44 -05:00
let args = args_res?;
match &link_name[..] {
"__rust_allocate" => {
2016-05-09 19:52:44 -05:00
let size = self.memory.read_usize(args[0])?;
let ptr = self.memory.allocate(size as usize);
2016-05-09 19:52:44 -05:00
self.memory.write_ptr(dest, ptr)?;
}
"__rust_reallocate" => {
2016-05-09 19:52:44 -05:00
let ptr = self.memory.read_ptr(args[0])?;
let size = self.memory.read_usize(args[2])?;
self.memory.reallocate(ptr, size as usize)?;
self.memory.write_ptr(dest, ptr)?;
}
2016-05-09 22:53:20 -05:00
"memcmp" => {
let left = self.memory.read_ptr(args[0])?;
let right = self.memory.read_ptr(args[1])?;
let n = self.memory.read_usize(args[2])? as usize;
let result = {
let left_bytes = self.memory.read_bytes(left, n)?;
let right_bytes = self.memory.read_bytes(right, n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
Less => -1,
Equal => 0,
Greater => 1,
}
};
self.memory.write_int(dest, result, dest_size)?;
}
_ => {
return Err(EvalError::Unimplemented(format!("can't call C ABI function: {}", link_name)));
}
}
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
2016-06-09 09:08:34 -05:00
Ok(())
}
fn assign_fields<I: IntoIterator<Item = u64>>(
&mut self,
dest: Pointer,
offsets: I,
operands: &[mir::Operand<'tcx>],
) -> EvalResult<'tcx, ()> {
for (offset, operand) in offsets.into_iter().zip(operands) {
2016-05-09 19:52:44 -05:00
let src = self.eval_operand(operand)?;
let src_ty = self.operand_ty(operand);
let field_dest = dest.offset(offset as isize);
2016-05-09 19:52:44 -05:00
self.move_(src, field_dest, src_ty)?;
}
Ok(())
}
fn eval_assignment(&mut self, lvalue: &mir::Lvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>)
-> EvalResult<'tcx, ()>
{
2016-05-09 19:52:44 -05:00
let dest = self.eval_lvalue(lvalue)?.to_ptr();
2016-04-07 06:56:07 -05:00
let dest_ty = self.lvalue_ty(lvalue);
let dest_layout = self.type_layout(dest_ty);
use rustc::mir::repr::Rvalue::*;
match *rvalue {
Use(ref operand) => {
2016-05-09 19:52:44 -05:00
let src = self.eval_operand(operand)?;
self.move_(src, dest, dest_ty)?;
}
BinaryOp(bin_op, ref left, ref right) => {
2016-05-09 19:52:44 -05:00
let left_ptr = self.eval_operand(left)?;
2016-03-17 03:53:03 -05:00
let left_ty = self.operand_ty(left);
2016-05-09 19:52:44 -05:00
let left_val = self.read_primval(left_ptr, left_ty)?;
2016-03-17 03:53:03 -05:00
2016-05-09 19:52:44 -05:00
let right_ptr = self.eval_operand(right)?;
2016-03-17 03:53:03 -05:00
let right_ty = self.operand_ty(right);
2016-05-09 19:52:44 -05:00
let right_val = self.read_primval(right_ptr, right_ty)?;
2016-03-17 03:53:03 -05:00
2016-05-09 19:52:44 -05:00
let val = primval::binary_op(bin_op, left_val, right_val)?;
self.memory.write_primval(dest, val)?;
}
// FIXME(solson): Factor this out with BinaryOp.
CheckedBinaryOp(bin_op, ref left, ref right) => {
let left_ptr = self.eval_operand(left)?;
let left_ty = self.operand_ty(left);
let left_val = self.read_primval(left_ptr, left_ty)?;
let right_ptr = self.eval_operand(right)?;
let right_ty = self.operand_ty(right);
let right_val = self.read_primval(right_ptr, right_ty)?;
let val = primval::binary_op(bin_op, left_val, right_val)?;
self.memory.write_primval(dest, val)?;
// FIXME(solson): Find the result type size properly. Perhaps refactor out
// Projection calculations so we can do the equivalent of `dest.1` here.
let s = self.type_size(left_ty);
self.memory.write_bool(dest.offset(s as isize), false)?;
}
2016-06-11 13:38:28 -05:00
2016-03-07 07:57:08 -06:00
UnaryOp(un_op, ref operand) => {
2016-05-09 19:52:44 -05:00
let ptr = self.eval_operand(operand)?;
2016-03-17 03:53:03 -05:00
let ty = self.operand_ty(operand);
2016-05-09 19:52:44 -05:00
let val = self.read_primval(ptr, ty)?;
2016-05-30 08:27:52 -05:00
self.memory.write_primval(dest, primval::unary_op(un_op, val)?)?;
2016-03-07 07:57:08 -06:00
}
Aggregate(ref kind, ref operands) => {
use rustc::ty::layout::Layout::*;
match *dest_layout {
Univariant { ref variant, .. } => {
let offsets = iter::once(0)
.chain(variant.offset_after_field.iter().map(|s| s.bytes()));
2016-05-09 19:52:44 -05:00
self.assign_fields(dest, offsets, operands)?;
}
Array { .. } => {
let elem_size = match dest_ty.sty {
ty::TyArray(elem_ty, _) => self.type_size(elem_ty) as u64,
_ => panic!("tried to assign {:?} to non-array type {:?}",
kind, dest_ty),
};
let offsets = (0..).map(|i| i * elem_size);
2016-05-09 19:52:44 -05:00
self.assign_fields(dest, offsets, operands)?;
}
General { discr, ref variants, .. } => {
if let mir::AggregateKind::Adt(adt_def, variant, _) = *kind {
let discr_val = adt_def.variants[variant].disr_val.to_u64_unchecked();
let discr_size = discr.size().bytes() as usize;
2016-05-09 19:52:44 -05:00
self.memory.write_uint(dest, discr_val, discr_size)?;
let offsets = variants[variant].offset_after_field.iter()
.map(|s| s.bytes());
2016-05-09 19:52:44 -05:00
self.assign_fields(dest, offsets, operands)?;
} else {
panic!("tried to assign {:?} to Layout::General", kind);
2016-03-15 06:50:53 -05:00
}
}
RawNullablePointer { nndiscr, .. } => {
if let mir::AggregateKind::Adt(_, variant, _) = *kind {
if nndiscr == variant as u64 {
assert_eq!(operands.len(), 1);
let operand = &operands[0];
2016-05-09 19:52:44 -05:00
let src = self.eval_operand(operand)?;
let src_ty = self.operand_ty(operand);
2016-05-09 19:52:44 -05:00
self.move_(src, dest, src_ty)?;
} else {
assert_eq!(operands.len(), 0);
2016-05-09 19:52:44 -05:00
self.memory.write_isize(dest, 0)?;
}
} else {
panic!("tried to assign {:?} to Layout::RawNullablePointer", kind);
2016-03-15 06:50:53 -05:00
}
}
StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield } => {
if let mir::AggregateKind::Adt(_, variant, _) = *kind {
if nndiscr == variant as u64 {
let offsets = iter::once(0)
.chain(nonnull.offset_after_field.iter().map(|s| s.bytes()));
try!(self.assign_fields(dest, offsets, operands));
} else {
assert_eq!(operands.len(), 0);
2016-05-30 08:27:52 -05:00
let offset = self.nonnull_offset(dest_ty, nndiscr, discrfield)?;
let dest = dest.offset(offset.bytes() as isize);
try!(self.memory.write_isize(dest, 0));
}
} else {
panic!("tried to assign {:?} to Layout::RawNullablePointer", kind);
}
}
CEnum { discr, signed, .. } => {
assert_eq!(operands.len(), 0);
if let mir::AggregateKind::Adt(adt_def, variant, _) = *kind {
let val = adt_def.variants[variant].disr_val.to_u64_unchecked();
let size = discr.size().bytes() as usize;
if signed {
2016-05-09 19:52:44 -05:00
self.memory.write_int(dest, val as i64, size)?;
} else {
2016-05-09 19:52:44 -05:00
self.memory.write_uint(dest, val, size)?;
}
} else {
panic!("tried to assign {:?} to Layout::CEnum", kind);
}
}
2016-05-30 08:27:52 -05:00
_ => return Err(EvalError::Unimplemented(format!("can't handle destination layout {:?} when assigning {:?}", dest_layout, kind))),
}
}
2016-03-21 04:34:24 -05:00
Repeat(ref operand, _) => {
let (elem_size, length) = match dest_ty.sty {
ty::TyArray(elem_ty, n) => (self.type_size(elem_ty), n),
_ => panic!("tried to assign array-repeat to non-array type {:?}", dest_ty),
};
2016-05-09 19:52:44 -05:00
let src = self.eval_operand(operand)?;
for i in 0..length {
let elem_dest = dest.offset((i * elem_size) as isize);
2016-05-09 19:52:44 -05:00
self.memory.copy(src, elem_dest, elem_size)?;
2016-03-21 04:34:24 -05:00
}
}
Len(ref lvalue) => {
2016-05-09 19:52:44 -05:00
let src = self.eval_lvalue(lvalue)?;
let ty = self.lvalue_ty(lvalue);
let len = match ty.sty {
ty::TyArray(_, n) => n as u64,
ty::TySlice(_) => if let LvalueExtra::Length(n) = src.extra {
n
} else {
panic!("Rvalue::Len of a slice given non-slice pointer: {:?}", src);
},
_ => panic!("Rvalue::Len expected array or slice, got {:?}", ty),
};
2016-05-09 19:52:44 -05:00
self.memory.write_usize(dest, len)?;
}
2016-03-13 15:36:25 -05:00
Ref(_, _, ref lvalue) => {
2016-05-09 19:52:44 -05:00
let lv = self.eval_lvalue(lvalue)?;
self.memory.write_ptr(dest, lv.ptr)?;
match lv.extra {
LvalueExtra::None => {},
LvalueExtra::Length(len) => {
2016-03-21 00:24:27 -05:00
let len_ptr = dest.offset(self.memory.pointer_size as isize);
2016-05-09 19:52:44 -05:00
self.memory.write_usize(len_ptr, len)?;
}
LvalueExtra::DowncastVariant(..) =>
panic!("attempted to take a reference to an enum downcast lvalue"),
}
2016-03-13 15:36:25 -05:00
}
2015-12-28 22:24:05 -06:00
2016-03-14 23:05:50 -05:00
Box(ty) => {
let size = self.type_size(ty);
2016-03-17 04:19:13 -05:00
let ptr = self.memory.allocate(size);
2016-05-09 19:52:44 -05:00
self.memory.write_ptr(dest, ptr)?;
2016-03-14 23:05:50 -05:00
}
Cast(kind, ref operand, dest_ty) => {
use rustc::mir::repr::CastKind::*;
match kind {
Unsize => {
2016-06-08 06:43:34 -05:00
let src = self.eval_operand(operand)?;
let src_ty = self.operand_ty(operand);
2016-05-09 19:52:44 -05:00
self.move_(src, dest, src_ty)?;
let src_pointee_ty = pointee_type(src_ty).unwrap();
let dest_pointee_ty = pointee_type(dest_ty).unwrap();
match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
2016-03-17 09:01:34 -05:00
(&ty::TyArray(_, length), &ty::TySlice(_)) => {
2016-03-21 00:24:27 -05:00
let len_ptr = dest.offset(self.memory.pointer_size as isize);
2016-05-09 19:52:44 -05:00
self.memory.write_usize(len_ptr, length as u64)?;
2016-03-17 09:01:34 -05:00
}
2016-05-30 08:27:52 -05:00
_ => return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue))),
}
}
Misc => {
2016-06-08 06:43:34 -05:00
let src = self.eval_operand(operand)?;
let src_ty = self.operand_ty(operand);
2016-05-09 21:08:37 -05:00
// FIXME(solson): Wrong for almost everything.
warn!("misc cast from {:?} to {:?}", src_ty, dest_ty);
let dest_size = self.type_size(dest_ty);
let src_size = self.type_size(src_ty);
// Hack to support fat pointer -> thin pointer casts to keep tests for
// other things passing for now.
let is_fat_ptr_cast = pointee_type(src_ty).map(|ty| {
!self.type_is_sized(ty)
}).unwrap_or(false);
if dest_size == src_size || is_fat_ptr_cast {
self.memory.copy(src, dest, dest_size)?;
} else {
return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue)));
}
}
2016-06-08 06:43:34 -05:00
ReifyFnPointer => match self.operand_ty(operand).sty {
ty::TyFnDef(def_id, substs, fn_ty) => {
let fn_ptr = self.memory.create_fn_ptr(def_id, substs, fn_ty);
2016-06-08 06:43:34 -05:00
self.memory.write_ptr(dest, fn_ptr)?;
},
ref other => panic!("reify fn pointer on {:?}", other),
},
2016-05-30 08:27:52 -05:00
_ => return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue))),
}
}
2016-03-28 18:43:23 -05:00
InlineAsm { .. } => unimplemented!(),
}
Ok(())
}
fn nonnull_offset(&self, ty: Ty<'tcx>, nndiscr: u64, discrfield: &[u32]) -> EvalResult<'tcx, Size> {
// Skip the constant 0 at the start meant for LLVM GEP.
let mut path = discrfield.iter().skip(1).map(|&i| i as usize);
// Handle the field index for the outer non-null variant.
let inner_ty = match ty.sty {
ty::TyEnum(adt_def, substs) => {
let variant = &adt_def.variants[nndiscr as usize];
let index = path.next().unwrap();
let field = &variant.fields[index];
field.ty(self.tcx, substs)
}
_ => panic!(
"non-enum for StructWrappedNullablePointer: {}",
ty,
),
};
self.field_path_offset(inner_ty, path)
}
fn field_path_offset<I: Iterator<Item = usize>>(&self, mut ty: Ty<'tcx>, path: I) -> EvalResult<'tcx, Size> {
let mut offset = Size::from_bytes(0);
// Skip the initial 0 intended for LLVM GEP.
for field_index in path {
2016-05-30 08:27:52 -05:00
let field_offset = self.get_field_offset(ty, field_index)?;
ty = self.get_field_ty(ty, field_index)?;
offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap();
}
2016-05-30 08:27:52 -05:00
Ok(offset)
}
fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> {
match ty.sty {
ty::TyStruct(adt_def, substs) => {
2016-05-30 08:27:52 -05:00
Ok(adt_def.struct_variant().fields[field_index].ty(self.tcx, substs))
}
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
ty::TyBox(ty) => {
assert_eq!(field_index, 0);
2016-05-30 08:27:52 -05:00
Ok(ty)
}
2016-05-30 08:27:52 -05:00
_ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}", ty))),
}
}
fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
let layout = self.type_layout(ty);
use rustc::ty::layout::Layout::*;
match *layout {
Univariant { .. } => {
assert_eq!(field_index, 0);
2016-05-30 08:27:52 -05:00
Ok(Size::from_bytes(0))
}
FatPointer { .. } => {
let bytes = layout::FAT_PTR_ADDR * self.memory.pointer_size;
2016-05-30 08:27:52 -05:00
Ok(Size::from_bytes(bytes as u64))
}
2016-05-30 08:27:52 -05:00
_ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, with layout: {:?}", ty, layout))),
}
}
fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Pointer> {
use rustc::mir::repr::Operand::*;
match *op {
2016-05-09 19:52:44 -05:00
Consume(ref lvalue) => Ok(self.eval_lvalue(lvalue)?.to_ptr()),
Constant(mir::Constant { ref literal, ty, .. }) => {
use rustc::mir::repr::Literal::*;
2016-03-13 01:14:20 -06:00
match *literal {
2016-05-09 19:52:44 -05:00
Value { ref value } => Ok(self.const_to_ptr(value)?),
2016-06-03 08:48:56 -05:00
Item { def_id, substs } => {
if let ty::TyFnDef(..) = ty.sty {
2016-06-08 06:43:34 -05:00
// function items are zero sized
Ok(self.memory.allocate(0))
2016-06-03 08:48:56 -05:00
} else {
let cid = ConstantId {
def_id: def_id,
substs: substs,
2016-06-08 05:35:15 -05:00
kind: ConstantKind::Global,
};
2016-06-03 10:41:36 -05:00
Ok(*self.statics.get(&cid).expect("static should have been cached (rvalue)"))
2016-06-03 08:48:56 -05:00
}
},
2016-06-03 10:41:36 -05:00
Promoted { index } => {
let cid = ConstantId {
2016-06-03 10:41:36 -05:00
def_id: self.frame().def_id,
substs: self.substs(),
kind: ConstantKind::Promoted(index),
2016-06-03 10:41:36 -05:00
};
Ok(*self.statics.get(&cid).expect("a promoted constant hasn't been precomputed"))
},
}
}
}
}
2015-11-12 17:44:29 -06:00
fn eval_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> {
2016-03-07 07:48:38 -06:00
use rustc::mir::repr::Lvalue::*;
let ptr = match *lvalue {
2016-03-20 23:07:25 -05:00
ReturnPointer => self.frame().return_ptr
.expect("ReturnPointer used in a function with no return value"),
2016-06-11 13:38:28 -05:00
Arg(i) => self.frame().locals[i.index()],
Var(i) => self.frame().locals[self.frame().var_offset + i.index()],
Temp(i) => self.frame().locals[self.frame().temp_offset + i.index()],
2016-03-13 07:48:04 -05:00
2016-06-03 10:41:36 -05:00
Static(def_id) => {
let substs = self.tcx.mk_substs(subst::Substs::empty());
let cid = ConstantId {
def_id: def_id,
substs: substs,
2016-06-08 05:35:15 -05:00
kind: ConstantKind::Global,
};
*self.statics.get(&cid).expect("static should have been cached (lvalue)")
2016-06-03 10:41:36 -05:00
},
2016-03-13 07:48:04 -05:00
Projection(ref proj) => {
2016-05-09 19:52:44 -05:00
let base = self.eval_lvalue(&proj.base)?;
let base_ty = self.lvalue_ty(&proj.base);
let base_layout = self.type_layout(base_ty);
2016-03-13 07:48:04 -05:00
use rustc::mir::repr::ProjectionElem::*;
match proj.elem {
Field(field, _) => {
use rustc::ty::layout::Layout::*;
let variant = match *base_layout {
Univariant { ref variant, .. } => variant,
General { ref variants, .. } => {
if let LvalueExtra::DowncastVariant(variant_idx) = base.extra {
&variants[variant_idx]
} else {
panic!("field access on enum had no variant index");
}
}
RawNullablePointer { .. } => {
assert_eq!(field.index(), 0);
return Ok(base);
}
StructWrappedNullablePointer { ref nonnull, .. } => nonnull,
_ => panic!("field access on non-product type: {:?}", base_layout),
};
let offset = variant.field_offset(field.index()).bytes();
base.ptr.offset(offset as isize)
},
Downcast(_, variant) => {
use rustc::ty::layout::Layout::*;
match *base_layout {
General { discr, .. } => {
return Ok(Lvalue {
ptr: base.ptr.offset(discr.size().bytes() as isize),
extra: LvalueExtra::DowncastVariant(variant),
});
}
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
return Ok(base);
}
_ => panic!("variant downcast on non-aggregate: {:?}", base_layout),
}
2016-03-13 07:48:04 -05:00
},
Deref => {
let pointee_ty = pointee_type(base_ty).expect("Deref of non-pointer");
2016-05-09 19:52:44 -05:00
let ptr = self.memory.read_ptr(base.ptr)?;
let extra = match pointee_ty.sty {
ty::TySlice(_) | ty::TyStr => {
let len_ptr = base.ptr.offset(self.memory.pointer_size as isize);
2016-05-09 19:52:44 -05:00
let len = self.memory.read_usize(len_ptr)?;
LvalueExtra::Length(len)
}
ty::TyTrait(_) => unimplemented!(),
_ => LvalueExtra::None,
};
return Ok(Lvalue { ptr: ptr, extra: extra });
}
2016-03-13 15:36:25 -05:00
Index(ref operand) => {
let elem_size = match base_ty.sty {
2016-04-28 23:01:17 -05:00
ty::TyArray(elem_ty, _) |
ty::TySlice(elem_ty) => self.type_size(elem_ty),
_ => panic!("indexing expected an array or slice, got {:?}", base_ty),
};
2016-05-09 19:52:44 -05:00
let n_ptr = self.eval_operand(operand)?;
let n = self.memory.read_usize(n_ptr)?;
base.ptr.offset(n as isize * elem_size as isize)
}
ConstantIndex { .. } => unimplemented!(),
2016-06-11 13:38:28 -05:00
Subslice { .. } => unimplemented!(),
2016-03-13 07:48:04 -05:00
}
}
2016-03-07 07:48:38 -06:00
};
Ok(Lvalue { ptr: ptr, extra: LvalueExtra::None })
2016-03-07 07:48:38 -06:00
}
fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
self.monomorphize(self.mir().lvalue_ty(self.tcx, lvalue).to_ty(self.tcx), self.substs())
2016-03-21 00:11:06 -05:00
}
fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
self.monomorphize(self.mir().operand_ty(self.tcx, operand), self.substs())
2016-04-07 04:02:02 -05:00
}
fn move_(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, ()> {
let size = self.type_size(ty);
2016-05-09 19:52:44 -05:00
self.memory.copy(src, dest, size)?;
2016-04-07 06:56:07 -05:00
if self.type_needs_drop(ty) {
2016-05-09 19:52:44 -05:00
self.memory.drop_fill(src, size)?;
2016-04-07 06:56:07 -05:00
}
Ok(())
}
pub fn read_primval(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
use syntax::ast::{IntTy, UintTy};
2016-05-31 05:05:25 -05:00
let val = match (self.memory.pointer_size, &ty.sty) {
(_, &ty::TyBool) => PrimVal::Bool(self.memory.read_bool(ptr)?),
(_, &ty::TyInt(IntTy::I8)) => PrimVal::I8(self.memory.read_int(ptr, 1)? as i8),
(2, &ty::TyInt(IntTy::Is)) |
(_, &ty::TyInt(IntTy::I16)) => PrimVal::I16(self.memory.read_int(ptr, 2)? as i16),
(4, &ty::TyInt(IntTy::Is)) |
(_, &ty::TyInt(IntTy::I32)) => PrimVal::I32(self.memory.read_int(ptr, 4)? as i32),
(8, &ty::TyInt(IntTy::Is)) |
(_, &ty::TyInt(IntTy::I64)) => PrimVal::I64(self.memory.read_int(ptr, 8)? as i64),
(_, &ty::TyUint(UintTy::U8)) => PrimVal::U8(self.memory.read_uint(ptr, 1)? as u8),
(2, &ty::TyUint(UintTy::Us)) |
(_, &ty::TyUint(UintTy::U16)) => PrimVal::U16(self.memory.read_uint(ptr, 2)? as u16),
(4, &ty::TyUint(UintTy::Us)) |
(_, &ty::TyUint(UintTy::U32)) => PrimVal::U32(self.memory.read_uint(ptr, 4)? as u32),
(8, &ty::TyUint(UintTy::Us)) |
(_, &ty::TyUint(UintTy::U64)) => PrimVal::U64(self.memory.read_uint(ptr, 8)? as u64),
(_, &ty::TyRef(_, ty::TypeAndMut { ty, .. })) |
(_, &ty::TyRawPtr(ty::TypeAndMut { ty, .. })) => {
if self.type_is_sized(ty) {
match self.memory.read_ptr(ptr) {
Ok(p) => PrimVal::AbstractPtr(p),
Err(EvalError::ReadBytesAsPointer) => {
2016-05-09 19:52:44 -05:00
PrimVal::IntegerPtr(self.memory.read_usize(ptr)?)
}
Err(e) => return Err(e),
}
} else {
2016-05-30 08:27:52 -05:00
return Err(EvalError::Unimplemented(format!("unimplemented: primitive read of fat pointer type: {:?}", ty)));
}
}
_ => panic!("primitive read of non-primitive type: {:?}", ty),
};
Ok(val)
}
fn frame(&self) -> &Frame<'a, 'tcx> {
self.stack.last().expect("no call frames exist")
}
2016-06-15 05:55:04 -05:00
pub fn frame_mut(&mut self) -> &mut Frame<'a, 'tcx> {
self.stack.last_mut().expect("no call frames exist")
}
fn mir(&self) -> CachedMir<'a, 'tcx> {
self.frame().mir.clone()
2016-03-20 23:07:25 -05:00
}
2016-06-08 04:11:08 -05:00
fn substs(&self) -> &'tcx Substs<'tcx> {
self.frame().substs
}
2016-03-14 22:48:00 -05:00
}
2016-04-28 23:01:17 -05:00
fn pointee_type(ptr_ty: ty::Ty) -> Option<ty::Ty> {
match ptr_ty.sty {
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
ty::TyBox(ty) => {
Some(ty)
}
_ => None,
}
}
impl Lvalue {
fn to_ptr(self) -> Pointer {
assert_eq!(self.extra, LvalueExtra::None);
self.ptr
}
}
2016-03-14 22:48:00 -05:00
impl<'mir, 'tcx: 'mir> Deref for CachedMir<'mir, 'tcx> {
type Target = mir::Mir<'tcx>;
fn deref(&self) -> &mir::Mir<'tcx> {
match *self {
CachedMir::Ref(r) => r,
2016-05-27 09:12:17 -05:00
CachedMir::Owned(ref rc) => rc,
2016-03-14 22:48:00 -05:00
}
}
}
2016-03-28 18:43:23 -05:00
#[derive(Debug)]
pub struct ImplMethod<'tcx> {
pub method: Rc<ty::Method<'tcx>>,
pub substs: &'tcx Substs<'tcx>,
pub is_provided: bool,
}
/// Locates the applicable definition of a method, given its name.
2016-05-13 23:34:50 -05:00
pub fn get_impl_method<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
2016-03-28 18:43:23 -05:00
impl_def_id: DefId,
substs: &'tcx Substs<'tcx>,
name: ast::Name,
) -> ImplMethod<'tcx> {
assert!(!substs.types.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
match trait_def.ancestors(impl_def_id).fn_defs(tcx, name).next() {
Some(node_item) => {
2016-05-13 23:34:50 -05:00
let substs = tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
let substs = traits::translate_substs(&infcx, impl_def_id,
substs, node_item.node);
tcx.lift(&substs).unwrap_or_else(|| {
bug!("trans::meth::get_impl_method: translate_substs \
returned {:?} which contains inference types/regions",
substs);
})
});
2016-03-28 18:43:23 -05:00
ImplMethod {
method: node_item.item,
2016-05-13 23:34:50 -05:00
substs: substs,
2016-03-28 18:43:23 -05:00
is_provided: node_item.node.is_from_trait(),
}
}
None => {
2016-05-13 23:34:50 -05:00
bug!("method {:?} not found in {:?}", name, impl_def_id)
2016-03-28 18:43:23 -05:00
}
}
}
2016-05-09 21:08:37 -05:00
// TODO(solson): Upstream these methods into rustc::ty::layout.
trait IntegerExt {
fn size(self) -> Size;
}
impl IntegerExt for layout::Integer {
fn size(self) -> Size {
use rustc::ty::layout::Integer::*;
match self {
I1 | I8 => Size::from_bits(8),
I16 => Size::from_bits(16),
I32 => Size::from_bits(32),
I64 => Size::from_bits(64),
}
}
}
trait StructExt {
fn field_offset(&self, index: usize) -> Size;
}
impl StructExt for layout::Struct {
fn field_offset(&self, index: usize) -> Size {
if index == 0 {
Size::from_bytes(0)
} else {
self.offset_after_field[index - 1]
}
}
}