Merge pull request #200 from oli-obk/master

Get rid of the integer allocation (Round 2)
This commit is contained in:
Oliver Schneider 2017-06-21 16:22:16 +02:00 committed by GitHub
commit d539fafaf6
27 changed files with 693 additions and 302 deletions

View File

@ -10,6 +10,11 @@ before_script:
- cargo install xargo
- export RUST_SYSROOT=$HOME/rust
script:
- |
# get ourselves a MIR-ful libstd
cd xargo &&
RUSTFLAGS='-Zalways-encode-mir' xargo build &&
cd ..
- |
# Test plain miri
cargo build &&
@ -22,11 +27,7 @@ script:
cargo miri test &&
cd ..
- |
# get ourselves a MIR-ful libstd
cd xargo &&
RUSTFLAGS='-Zalways-encode-mir' xargo build &&
cd .. &&
# and run the tests with it
# and run all tests with full mir
MIRI_SYSROOT=~/.xargo/HOST cargo test
notifications:
email:

View File

@ -3,7 +3,6 @@ use syntax::ast::{FloatTy, IntTy, UintTy};
use error::{EvalResult, EvalError};
use eval_context::EvalContext;
use memory::Pointer;
use value::PrimVal;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
@ -24,7 +23,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Bool | Char | U8 | U16 | U32 | U64 | U128 => self.cast_int(val.to_u128()?, dest_ty, false),
FnPtr | Ptr => self.cast_ptr(val.to_ptr()?, dest_ty),
FnPtr | Ptr => self.cast_ptr(val, dest_ty),
}
}
@ -71,7 +70,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)),
TyChar => Err(EvalError::InvalidChar(v)),
TyRawPtr(_) => Ok(PrimVal::Ptr(Pointer::from_int(v as u64))),
TyRawPtr(_) => Ok(PrimVal::Bytes(v % (1 << self.memory.pointer_size()))),
_ => Err(EvalError::Unimplemented(format!("int to {:?} cast", ty))),
}
@ -92,11 +91,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
fn cast_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
fn cast_ptr(&self, ptr: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
use rustc::ty::TypeVariants::*;
match ty.sty {
TyRef(..) | TyRawPtr(_) | TyFnPtr(_) | TyInt(_) | TyUint(_) =>
Ok(PrimVal::Ptr(ptr)),
Ok(ptr),
_ => Err(EvalError::Unimplemented(format!("ptr to {:?} cast", ty))),
}
}

View File

@ -22,6 +22,7 @@ pub enum EvalError<'tcx> {
allocation_size: u64,
},
ReadPointerAsBytes,
ReadBytesAsPointer,
InvalidPointerMath,
ReadUndefBytes,
DeadLocal,
@ -81,6 +82,8 @@ impl<'tcx> Error for EvalError<'tcx> {
"pointer offset outside bounds of allocation",
EvalError::ReadPointerAsBytes =>
"a raw memory access tried to access part of a pointer value as raw bytes",
EvalError::ReadBytesAsPointer =>
"a memory access tried to interpret some bytes as a pointer",
EvalError::InvalidPointerMath =>
"attempted to do math or a comparison on pointers into different allocations",
EvalError::ReadUndefBytes =>

View File

@ -17,7 +17,7 @@ use syntax::abi::Abi;
use error::{EvalError, EvalResult};
use lvalue::{Global, GlobalId, Lvalue, LvalueExtra};
use memory::{Memory, Pointer};
use memory::{Memory, Pointer, TlsKey};
use operator;
use value::{PrimVal, PrimValKind, Value};
@ -99,6 +99,11 @@ pub enum StackPopCleanup {
/// A regular stackframe added due to a function call will need to get forwarded to the next
/// block
Goto(mir::BasicBlock),
/// After finishing a tls destructor, find the next one instead of starting from the beginning
/// and thus just rerunning the first one until its `data` argument is null
///
/// The index is the current tls destructor's index
Tls(Option<TlsKey>),
/// The main function and diverging functions have nowhere to return to
None,
}
@ -350,6 +355,25 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
},
StackPopCleanup::Goto(target) => self.goto_block(target),
StackPopCleanup::None => {},
StackPopCleanup::Tls(key) => {
// either fetch the next dtor or start new from the beginning, if any are left with a non-null data
if let Some((instance, ptr, key)) = self.memory.fetch_tls_dtor(key).or_else(|| self.memory.fetch_tls_dtor(None)) {
trace!("Running TLS dtor {:?} on {:?}", instance, ptr);
// TODO: Potentially, this has to support all the other possible instances? See eval_fn_call in terminator/mod.rs
let mir = self.load_mir(instance.def)?;
self.push_stack_frame(
instance,
mir.span,
mir,
Lvalue::zst(),
StackPopCleanup::Tls(Some(key)),
)?;
let arg_local = self.frame().mir.args_iter().next().ok_or(EvalError::AbiViolation("TLS dtor does not take enough arguments.".to_owned()))?;
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
self.write_primval(dest, ptr, ty)?;
}
}
}
// deallocate all locals that are backed by an allocation
for local in frame.locals {
@ -389,13 +413,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
where J::IntoIter: ExactSizeIterator,
{
// FIXME(solson)
let dest_ptr = self.force_allocation(dest)?.to_ptr();
let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
let discr_dest = dest_ptr.offset(discr_offset, self.memory.layout)?;
self.memory.write_uint(discr_dest, discr_val, discr_size)?;
let dest = Lvalue::Ptr {
ptr: dest_ptr,
ptr: PrimVal::Ptr(dest_ptr),
extra: LvalueExtra::DowncastVariant(variant_idx),
};
@ -481,7 +505,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
match *dest_layout {
Univariant { ref variant, .. } => {
if variant.packed {
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0;
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
self.memory.mark_packed(ptr, variant.stride().bytes());
}
self.assign_fields(dest, dest_ty, operands)?;
@ -499,7 +523,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
.to_u128_unchecked();
let discr_size = discr.size().bytes();
if variants[variant].packed {
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0;
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
self.memory.mark_packed(ptr, variants[variant].stride().bytes());
}
@ -541,7 +565,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield, .. } => {
if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
if nonnull.packed {
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0;
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
self.memory.mark_packed(ptr, nonnull.stride().bytes());
}
if nndiscr == variant as u64 {
@ -554,7 +578,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
let dest = self.force_allocation(dest)?.to_ptr()?;
let dest = dest.offset(offset.bytes(), self.memory.layout)?;
let dest_size = self.type_size(ty)?
@ -613,7 +637,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let value = self.eval_operand(operand)?;
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
let dest = PrimVal::Ptr(self.force_allocation(dest)?.to_ptr()?);
for i in 0..length {
let elem_dest = dest.offset(i * elem_size, self.memory.layout)?;
@ -630,8 +654,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ref(_, _, ref lvalue) => {
let src = self.eval_lvalue(lvalue)?;
let (raw_ptr, extra) = self.force_allocation(src)?.to_ptr_and_extra();
let ptr = PrimVal::Ptr(raw_ptr);
let (ptr, extra) = self.force_allocation(src)?.to_ptr_and_extra();
let val = match extra {
LvalueExtra::None => Value::ByVal(ptr),
@ -726,7 +749,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Discriminant(ref lvalue) => {
let lval = self.eval_lvalue(lvalue)?;
let ty = self.lvalue_ty(lvalue);
let ptr = self.force_allocation(lval)?.to_ptr();
let ptr = self.force_allocation(lval)?.to_ptr()?;
let discr_val = self.read_discriminant_value(ptr, ty)?;
if let ty::TyAdt(adt_def, _) = ty.sty {
if adt_def.discriminants(self.tcx).all(|v| discr_val != v.to_u128_unchecked()) {
@ -856,23 +879,27 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
pub(super) fn wrapping_pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> {
pub(super) fn wrapping_pointer_offset(&self, ptr: PrimVal, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, PrimVal> {
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
let offset = offset.overflowing_mul(pointee_size).0;
Ok(ptr.wrapping_signed_offset(offset, self.memory.layout))
ptr.wrapping_signed_offset(offset, self.memory.layout)
}
pub(super) fn pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> {
pub(super) fn pointer_offset(&self, ptr: PrimVal, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, PrimVal> {
if offset == 0 {
// rustc relies on Offset-by-0 to be well-defined even for "bad" pointers like Unique::empty().
return Ok(ptr);
}
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
if pointee_size == 0 {
// rustc relies on offsetting pointers to zsts to be a nop
return Ok(ptr);
}
return if let Some(offset) = offset.checked_mul(pointee_size) {
let ptr = ptr.signed_offset(offset, self.memory.layout)?;
self.memory.check_bounds(ptr, false)?;
self.memory.check_bounds(ptr.to_ptr()?, false)?;
Ok(ptr)
} else {
Err(EvalError::OverflowingMath)
@ -920,7 +947,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
}
fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
fn copy(&mut self, src: PrimVal, dest: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> {
let size = self.type_size(ty)?.expect("cannot copy from an unsized type");
let align = self.type_align(ty)?;
self.memory.copy(src, dest, size, align)?;
@ -946,7 +973,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let substs = self.stack[frame].instance.substs;
let ptr = self.alloc_ptr_with_substs(ty, substs)?;
self.stack[frame].locals[local.index() - 1] = Some(Value::ByRef(ptr)); // it stays live
self.write_value_to_ptr(val, ptr, ty)?;
self.write_value_to_ptr(val, PrimVal::Ptr(ptr), ty)?;
let lval = Lvalue::from_ptr(ptr);
if let Some((field, field_ty)) = field {
self.lvalue_field(lval, field, ty, field_ty)?
@ -964,7 +991,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
_ => {
let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.instance.substs)?;
self.memory.mark_static(ptr.alloc_id);
self.write_value_to_ptr(global_val.value, ptr, global_val.ty)?;
self.write_value_to_ptr(global_val.value, PrimVal::Ptr(ptr), global_val.ty)?;
// see comment on `initialized` field
if global_val.initialized {
self.memory.mark_static_initalized(ptr.alloc_id, global_val.mutable)?;
@ -1067,7 +1094,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
//
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
// knew for certain that there were no outstanding pointers to this allocation.
self.write_value_to_ptr(src_val, dest_ptr, dest_ty)?;
self.write_value_to_ptr(src_val, PrimVal::Ptr(dest_ptr), dest_ty)?;
} else if let Value::ByRef(src_ptr) = src_val {
// If the value is not `ByRef`, then we know there are no pointers to it
@ -1085,7 +1112,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
write_dest(self, src_val)?;
} else {
let dest_ptr = self.alloc_ptr(dest_ty)?;
self.copy(src_ptr, dest_ptr, dest_ty)?;
self.copy(PrimVal::Ptr(src_ptr), PrimVal::Ptr(dest_ptr), dest_ty)?;
write_dest(self, Value::ByRef(dest_ptr))?;
}
@ -1100,16 +1127,16 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(super) fn write_value_to_ptr(
&mut self,
value: Value,
dest: Pointer,
dest: PrimVal,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
match value {
Value::ByRef(ptr) => self.copy(ptr, dest, dest_ty),
Value::ByRef(ptr) => self.copy(PrimVal::Ptr(ptr), dest, dest_ty),
Value::ByVal(primval) => {
let size = self.type_size(dest_ty)?.expect("dest type must be sized");
self.memory.write_primval(dest, primval, size)
}
Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest, dest_ty),
Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty),
}
}
@ -1130,8 +1157,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let field_1_ty = self.get_field_ty(ty, 1)?;
let field_0_size = self.type_size(field_0_ty)?.expect("pair element type must be sized");
let field_1_size = self.type_size(field_1_ty)?.expect("pair element type must be sized");
self.memory.write_primval(ptr.offset(field_0, self.memory.layout)?, a, field_0_size)?;
self.memory.write_primval(ptr.offset(field_1, self.memory.layout)?, b, field_1_size)?;
self.memory.write_primval(PrimVal::Ptr(ptr.offset(field_0, self.memory.layout)?), a, field_0_size)?;
self.memory.write_primval(PrimVal::Ptr(ptr.offset(field_1, self.memory.layout)?), b, field_1_size)?;
Ok(())
}
@ -1242,20 +1269,20 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
fn read_ptr(&mut self, ptr: Pointer, pointee_ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
pub(crate) fn read_ptr(&self, ptr: Pointer, pointee_ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
let p = self.memory.read_ptr(ptr)?;
if self.type_is_sized(pointee_ty) {
Ok(Value::ByVal(PrimVal::Ptr(p)))
Ok(Value::ByVal(p))
} else {
trace!("reading fat pointer extra of type {}", pointee_ty);
let extra = ptr.offset(self.memory.pointer_size(), self.memory.layout)?;
let extra = match self.tcx.struct_tail(pointee_ty).sty {
ty::TyDynamic(..) => PrimVal::Ptr(self.memory.read_ptr(extra)?),
ty::TyDynamic(..) => self.memory.read_ptr(extra)?,
ty::TySlice(..) |
ty::TyStr => PrimVal::from_u128(self.memory.read_usize(extra)? as u128),
_ => bug!("unsized primval ptr read from {:?}", pointee_ty),
};
Ok(Value::ByValPair(PrimVal::Ptr(p), extra))
Ok(Value::ByValPair(p, extra))
}
}
@ -1301,7 +1328,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ty::TyFloat(FloatTy::F32) => PrimVal::from_f32(self.memory.read_f32(ptr)?),
ty::TyFloat(FloatTy::F64) => PrimVal::from_f64(self.memory.read_f64(ptr)?),
ty::TyFnPtr(_) => self.memory.read_ptr(ptr).map(PrimVal::Ptr)?,
ty::TyFnPtr(_) => self.memory.read_ptr(ptr)?,
ty::TyRef(_, ref tam) |
ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some),
@ -1360,7 +1387,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
(&ty::TyArray(_, length), &ty::TySlice(_)) => {
let ptr = src.read_ptr(&self.memory)?;
let len = PrimVal::from_u128(length as u128);
let ptr = PrimVal::Ptr(ptr);
self.write_value(Value::ByValPair(ptr, len), dest, dest_ty)
}
(&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
@ -1374,7 +1400,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let trait_ref = self.tcx.erase_regions(&trait_ref);
let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
let ptr = src.read_ptr(&self.memory)?;
let ptr = PrimVal::Ptr(ptr);
let extra = PrimVal::Ptr(vtable);
self.write_value(Value::ByValPair(ptr, extra), dest, dest_ty)
},
@ -1423,7 +1448,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
};
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
let dest = self.force_allocation(dest)?.to_ptr()?;
let iter = src_fields.zip(dst_fields).enumerate();
for (i, (src_f, dst_f)) in iter {
let src_fty = monomorphize_field_ty(self.tcx, src_f, substs_a);
@ -1436,7 +1461,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let src_f_ptr = src_ptr.offset(src_field_offset, self.memory.layout)?;
let dst_f_ptr = dest.offset(dst_field_offset, self.memory.layout)?;
if src_fty == dst_fty {
self.copy(src_f_ptr, dst_f_ptr, src_fty)?;
self.copy(PrimVal::Ptr(src_f_ptr), PrimVal::Ptr(dst_f_ptr), src_fty)?;
} else {
self.unsize_into(Value::ByRef(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?;
}
@ -1633,7 +1658,7 @@ pub fn eval_main<'a, 'tcx: 'a>(
start_mir.span,
start_mir,
Lvalue::from_ptr(ret_ptr),
StackPopCleanup::None,
StackPopCleanup::Tls(None),
)?;
let mut args = ecx.frame().mir.args_iter();
@ -1659,8 +1684,8 @@ pub fn eval_main<'a, 'tcx: 'a>(
main_instance,
main_mir.span,
main_mir,
Lvalue::from_ptr(Pointer::zst_ptr()),
StackPopCleanup::None,
Lvalue::zst(),
StackPopCleanup::Tls(None),
)?;
}

View File

@ -12,7 +12,10 @@ use value::{PrimVal, Value};
pub enum Lvalue<'tcx> {
/// An lvalue referring to a value allocated in the `Memory` system.
Ptr {
ptr: Pointer,
/// An lvalue may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
ptr: PrimVal,
extra: LvalueExtra,
},
@ -61,11 +64,24 @@ pub struct Global<'tcx> {
}
impl<'tcx> Lvalue<'tcx> {
pub fn from_ptr(ptr: Pointer) -> Self {
/// Produces an Lvalue that will error if attempted to be read from
pub fn undef() -> Self {
Self::from_primval_ptr(PrimVal::Undef)
}
fn from_primval_ptr(ptr: PrimVal) -> Self {
Lvalue::Ptr { ptr, extra: LvalueExtra::None }
}
pub(super) fn to_ptr_and_extra(self) -> (Pointer, LvalueExtra) {
pub fn zst() -> Self {
Self::from_ptr(Pointer::zst_ptr())
}
pub fn from_ptr(ptr: Pointer) -> Self {
Self::from_primval_ptr(PrimVal::Ptr(ptr))
}
pub(super) fn to_ptr_and_extra(self) -> (PrimVal, LvalueExtra) {
match self {
Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
@ -73,10 +89,10 @@ impl<'tcx> Lvalue<'tcx> {
}
}
pub(super) fn to_ptr(self) -> Pointer {
pub(super) fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
let (ptr, extra) = self.to_ptr_and_extra();
assert_eq!(extra, LvalueExtra::None);
ptr
ptr.to_ptr()
}
pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
@ -127,7 +143,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
match lvalue {
Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
Ok(Value::ByRef(ptr))
Ok(Value::ByRef(ptr.to_ptr()?))
}
Lvalue::Local { frame, local, field } => {
self.stack[frame].get_local(local, field.map(|(i, _)| i))
@ -167,7 +183,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
field_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Lvalue<'tcx>> {
let base_layout = self.type_layout(base_ty)?;
use rustc::ty::layout::Layout::*;
let (offset, packed) = match *base_layout {
Univariant { ref variant, .. } => {
@ -229,19 +244,24 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Lvalue::Local { frame, local, field } => match self.stack[frame].get_local(local, field.map(|(i, _)| i))? {
Value::ByRef(ptr) => {
assert!(field.is_none(), "local can't be ByRef and have a field offset");
(ptr, LvalueExtra::None)
(PrimVal::Ptr(ptr), LvalueExtra::None)
},
Value::ByVal(PrimVal::Undef) => {
// FIXME: allocate in fewer cases
if self.ty_to_primval_kind(base_ty).is_ok() {
return Ok(base);
} else {
(self.force_allocation(base)?.to_ptr(), LvalueExtra::None)
(PrimVal::Ptr(self.force_allocation(base)?.to_ptr()?), LvalueExtra::None)
}
},
Value::ByVal(_) => {
assert_eq!(field_index, 0, "ByVal can only have 1 non zst field with offset 0");
return Ok(base);
if self.get_field_count(base_ty)? == 1 {
assert_eq!(field_index, 0, "ByVal can only have 1 non zst field with offset 0");
return Ok(base);
}
// this branch is taken when a union creates a large ByVal which is then
// accessed as a struct with multiple small fields
(PrimVal::Ptr(self.force_allocation(base)?.to_ptr()?), LvalueExtra::None)
},
Value::ByValPair(_, _) => {
let field_count = self.get_field_count(base_ty)?;
@ -264,7 +284,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let offset = match base_extra {
LvalueExtra::Vtable(tab) => {
let (_, align) = self.size_and_align_of_dst(base_ty, Value::ByValPair(PrimVal::Ptr(base_ptr), PrimVal::Ptr(tab)))?;
let (_, align) = self.size_and_align_of_dst(base_ty, Value::ByValPair(base_ptr, PrimVal::Ptr(tab)))?;
offset.abi_align(Align::from_bytes(align, align).unwrap()).bytes()
}
_ => offset.bytes(),
@ -276,7 +296,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
if packed {
let size = self.type_size(field_ty)?.expect("packed struct must be sized");
self.memory.mark_packed(ptr, size);
self.memory.mark_packed(ptr.to_ptr()?, size);
}
let extra = if self.type_is_sized(field_ty) {

View File

@ -6,7 +6,7 @@ use rustc::ty;
use rustc::ty::layout::{self, TargetDataLayout};
use error::{EvalError, EvalResult};
use value::PrimVal;
use value::{PrimVal, self};
////////////////////////////////////////////////////////////////////////////////
// Allocations and pointers
@ -61,70 +61,31 @@ impl Pointer {
}
pub fn wrapping_signed_offset<'tcx>(self, i: i64, layout: &TargetDataLayout) -> Self {
Pointer::new(self.alloc_id, (self.offset.wrapping_add(i as u64) as u128 % (1u128 << layout.pointer_size.bits())) as u64)
Pointer::new(self.alloc_id, value::wrapping_signed_offset(self.offset, i, layout))
}
pub fn signed_offset<'tcx>(self, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
// this formula only works for true negative values, it panics for zero!
let n = u64::max_value() - (i as u64) + 1;
if let Some(res) = self.offset.checked_sub(n) {
Ok(Pointer::new(self.alloc_id, res))
} else {
Err(EvalError::OverflowingMath)
}
} else {
self.offset(i as u64, layout)
}
Ok(Pointer::new(self.alloc_id, value::signed_offset(self.offset, i, layout)?))
}
pub fn offset<'tcx>(self, i: u64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> {
if let Some(res) = self.offset.checked_add(i) {
if res as u128 >= (1u128 << layout.pointer_size.bits()) {
Err(EvalError::OverflowingMath)
} else {
Ok(Pointer::new(self.alloc_id, res))
}
} else {
Err(EvalError::OverflowingMath)
}
Ok(Pointer::new(self.alloc_id, value::offset(self.offset, i, layout)?))
}
pub fn points_to_zst(&self) -> bool {
self.alloc_id == ZST_ALLOC_ID
}
pub fn to_int<'tcx>(&self) -> EvalResult<'tcx, u64> {
match self.alloc_id {
NEVER_ALLOC_ID => Ok(self.offset),
_ => Err(EvalError::ReadPointerAsBytes),
}
}
pub fn from_int(i: u64) -> Self {
Pointer::new(NEVER_ALLOC_ID, i)
}
pub fn zst_ptr() -> Self {
Pointer::new(ZST_ALLOC_ID, 0)
}
pub fn never_ptr() -> Self {
Pointer::new(NEVER_ALLOC_ID, 0)
}
pub fn is_null_ptr(&self) -> bool {
return *self == Pointer::from_int(0)
}
}
pub type TlsKey = usize;
#[derive(Copy, Clone, Debug)]
pub struct TlsEntry<'tcx> {
data: Pointer, // Will eventually become a map from thread IDs to pointers, if we ever support more than one thread.
data: PrimVal, // Will eventually become a map from thread IDs to `PrimVal`s, if we ever support more than one thread.
dtor: Option<ty::Instance<'tcx>>,
}
@ -180,14 +141,13 @@ pub struct Memory<'a, 'tcx> {
literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
/// pthreads-style thread-local storage.
thread_local: HashMap<TlsKey, TlsEntry<'tcx>>,
thread_local: BTreeMap<TlsKey, TlsEntry<'tcx>>,
/// The Key to use for the next thread-local allocation.
next_thread_local: TlsKey,
}
const ZST_ALLOC_ID: AllocId = AllocId(0);
const NEVER_ALLOC_ID: AllocId = AllocId(1);
impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn new(layout: &'a TargetDataLayout, max_memory: u64) -> Self {
@ -202,7 +162,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
packed: BTreeSet::new(),
static_alloc: HashSet::new(),
literal_alloc_cache: HashMap::new(),
thread_local: HashMap::new(),
thread_local: BTreeMap::new(),
next_thread_local: 0,
}
}
@ -395,7 +355,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub(crate) fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
let new_key = self.next_thread_local;
self.next_thread_local += 1;
self.thread_local.insert(new_key, TlsEntry { data: Pointer::from_int(0), dtor });
self.thread_local.insert(new_key, TlsEntry { data: PrimVal::Bytes(0), dtor });
trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
return new_key;
}
@ -410,7 +370,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
}
pub(crate) fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> {
pub(crate) fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, PrimVal> {
return match self.thread_local.get(&key) {
Some(&TlsEntry { data, .. }) => {
trace!("TLS key {} loaded: {:?}", key, data);
@ -420,7 +380,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
}
pub(crate) fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> {
pub(crate) fn store_tls(&mut self, key: TlsKey, new_data: PrimVal) -> EvalResult<'tcx> {
return match self.thread_local.get_mut(&key) {
Some(&mut TlsEntry { ref mut data, .. }) => {
trace!("TLS key {} stored: {:?}", key, new_data);
@ -431,14 +391,36 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
}
// Returns a dtor and its argument, if one is supposed to run
pub(crate) fn fetch_tls_dtor(&mut self) -> Option<(ty::Instance<'tcx>, Pointer)> {
for (_, &mut TlsEntry { ref mut data, dtor }) in self.thread_local.iter_mut() {
if !data.is_null_ptr() {
/// Returns a dtor, its argument and its index, if one is supposed to run
///
/// An optional destructor function may be associated with each key value.
/// At thread exit, if a key value has a non-NULL destructor pointer,
/// and the thread has a non-NULL value associated with that key,
/// the value of the key is set to NULL, and then the function pointed
/// to is called with the previously associated value as its sole argument.
/// The order of destructor calls is unspecified if more than one destructor
/// exists for a thread when it exits.
///
/// If, after all the destructors have been called for all non-NULL values
/// with associated destructors, there are still some non-NULL values with
/// associated destructors, then the process is repeated.
/// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor
/// calls for outstanding non-NULL values, there are still some non-NULL values
/// with associated destructors, implementations may stop calling destructors,
/// or they may continue calling destructors until no non-NULL values with
/// associated destructors exist, even though this might result in an infinite loop.
pub(crate) fn fetch_tls_dtor(&mut self, key: Option<TlsKey>) -> Option<(ty::Instance<'tcx>, PrimVal, TlsKey)> {
use std::collections::Bound::*;
let start = match key {
Some(key) => Excluded(key),
None => Unbounded,
};
for (&key, &mut TlsEntry { ref mut data, dtor }) in self.thread_local.range_mut((start, Unbounded)) {
if *data != PrimVal::Bytes(0) {
if let Some(dtor) = dtor {
let old_data = *data;
*data = Pointer::from_int(0);
return Some((dtor, old_data));
let ret = Some((dtor, *data, key));
*data = PrimVal::Bytes(0);
return ret;
}
}
}
@ -467,7 +449,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Some(alloc) => Ok(alloc),
None => match self.functions.get(&id) {
Some(_) => Err(EvalError::DerefFunctionPointer),
None if id == NEVER_ALLOC_ID || id == ZST_ALLOC_ID => Err(EvalError::InvalidMemoryAccess),
None if id == ZST_ALLOC_ID => Err(EvalError::InvalidMemoryAccess),
None => Err(EvalError::DanglingPointerDeref),
}
}
@ -482,7 +464,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
},
None => match self.functions.get(&id) {
Some(_) => Err(EvalError::DerefFunctionPointer),
None if id == NEVER_ALLOC_ID || id == ZST_ALLOC_ID => Err(EvalError::InvalidMemoryAccess),
None if id == ZST_ALLOC_ID => Err(EvalError::InvalidMemoryAccess),
None => Err(EvalError::DanglingPointerDeref),
}
}
@ -513,7 +495,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
let mut allocs_seen = HashSet::new();
while let Some(id) = allocs_to_print.pop_front() {
if id == ZST_ALLOC_ID || id == NEVER_ALLOC_ID { continue; }
if id == ZST_ALLOC_ID { continue; }
let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
let prefix_len = msg.len();
let mut relocations = vec![];
@ -563,7 +545,6 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
let target = match target_id {
ZST_ALLOC_ID => String::from("zst"),
NEVER_ALLOC_ID => String::from("int ptr"),
_ => format!("({})", target_id),
};
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
@ -622,9 +603,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
fn get_bytes(&self, ptr: Pointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
return Ok(&[]);
}
assert_ne!(size, 0);
if self.relocations(ptr, size)?.count() != 0 {
return Err(EvalError::ReadPointerAsBytes);
}
@ -633,11 +612,9 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
fn get_bytes_mut(&mut self, ptr: Pointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
if size == 0 {
return Ok(&mut []);
}
assert_ne!(size, 0);
self.clear_relocations(ptr, size)?;
self.mark_definedness(ptr, size, true)?;
self.mark_definedness(PrimVal::Ptr(ptr), size, true)?;
self.get_bytes_unchecked_mut(ptr, size, align)
}
}
@ -647,7 +624,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
/// mark an allocation as being the entry point to a static (see `static_alloc` field)
pub fn mark_static(&mut self, alloc_id: AllocId) {
trace!("mark_static: {:?}", alloc_id);
if alloc_id != NEVER_ALLOC_ID && alloc_id != ZST_ALLOC_ID && !self.static_alloc.insert(alloc_id) {
if alloc_id != ZST_ALLOC_ID && !self.static_alloc.insert(alloc_id) {
bug!("tried to mark an allocation ({:?}) as static twice", alloc_id);
}
}
@ -677,7 +654,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
// mark recursively
mem::replace(relocations, Default::default())
},
None if alloc_id == NEVER_ALLOC_ID || alloc_id == ZST_ALLOC_ID => return Ok(()),
None if alloc_id == ZST_ALLOC_ID => return Ok(()),
None if !self.functions.contains_key(&alloc_id) => return Err(EvalError::DanglingPointerDeref),
_ => return Ok(()),
};
@ -690,10 +667,12 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64) -> EvalResult<'tcx> {
pub fn copy(&mut self, src: PrimVal, dest: PrimVal, size: u64, align: u64) -> EvalResult<'tcx> {
if size == 0 {
return Ok(());
}
let src = src.to_ptr()?;
let dest = dest.to_ptr()?;
self.check_relocation_edges(src, size)?;
let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr();
@ -733,25 +712,36 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
}
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
self.get_bytes(ptr, size, 1)
pub fn read_bytes(&self, ptr: PrimVal, size: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, 1)
}
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
if src.is_empty() {
return Ok(());
}
let bytes = self.get_bytes_mut(ptr, src.len() as u64, 1)?;
bytes.clone_from_slice(src);
Ok(())
}
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
if count == 0 {
return Ok(());
}
let bytes = self.get_bytes_mut(ptr, count, 1)?;
for b in bytes { *b = val; }
Ok(())
}
pub fn read_ptr(&self, ptr: Pointer) -> EvalResult<'tcx, Pointer> {
pub fn read_ptr(&self, ptr: Pointer) -> EvalResult<'tcx, PrimVal> {
let size = self.pointer_size();
self.check_defined(ptr, size)?;
if self.check_defined(ptr, size).is_err() {
return Ok(PrimVal::Undef);
}
let endianess = self.endianess();
let bytes = self.get_bytes_unchecked(ptr, size, size)?;
let offset = read_target_uint(endianess, bytes).unwrap();
@ -759,29 +749,27 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
let offset = offset as u64;
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => Ok(Pointer::new(alloc_id, offset)),
None => Ok(Pointer::from_int(offset)),
Some(&alloc_id) => Ok(PrimVal::Ptr(Pointer::new(alloc_id, offset))),
None => Ok(PrimVal::Bytes(offset as u128)),
}
}
pub fn write_ptr(&mut self, dest: Pointer, ptr: Pointer) -> EvalResult<'tcx> {
self.write_usize(dest, ptr.offset as u64)?;
if ptr.alloc_id != NEVER_ALLOC_ID {
self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id);
}
self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id);
Ok(())
}
pub fn write_primval(
&mut self,
dest: Pointer,
dest: PrimVal,
val: PrimVal,
size: u64,
) -> EvalResult<'tcx> {
match val {
PrimVal::Ptr(ptr) => {
assert_eq!(size, self.pointer_size());
self.write_ptr(dest, ptr)
self.write_ptr(dest.to_ptr()?, ptr)
}
PrimVal::Bytes(bytes) => {
@ -795,7 +783,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
16 => !0,
_ => bug!("unexpected PrimVal::Bytes size"),
};
self.write_uint(dest, bytes & mask, size)
self.write_uint(dest.to_ptr()?, bytes & mask, size)
}
PrimVal::Undef => self.mark_definedness(dest, size, false),
@ -981,13 +969,14 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn mark_definedness(
&mut self,
ptr: Pointer,
ptr: PrimVal,
size: u64,
new_state: bool
) -> EvalResult<'tcx> {
if size == 0 {
return Ok(())
}
let ptr = ptr.to_ptr()?;
let mut alloc = self.get_mut(ptr.alloc_id)?;
alloc.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
Ok(())

View File

@ -4,7 +4,6 @@ use rustc::ty::{self, Ty};
use error::{EvalError, EvalResult};
use eval_context::EvalContext;
use lvalue::Lvalue;
use memory::Pointer;
use value::{
PrimVal,
PrimValKind,
@ -73,6 +72,7 @@ macro_rules! int_arithmetic {
($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
let l = $l;
let r = $r;
use value::PrimValKind::*;
match $kind {
I8 => overflow!($int_op, l as i8, r as i8),
I16 => overflow!($int_op, l as i16, r as i16),
@ -143,18 +143,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
use rustc::mir::BinOp::*;
use value::PrimValKind::*;
// FIXME(solson): Temporary hack. It will go away when we get rid of Pointer's ability to store
// plain bytes, and leave that to PrimVal::Bytes.
fn normalize(val: PrimVal) -> PrimVal {
if let PrimVal::Ptr(ptr) = val {
if let Ok(bytes) = ptr.to_int() {
return PrimVal::Bytes(bytes as u128);
}
}
val
}
let (left, right) = (normalize(left), normalize(right));
let left_kind = self.ty_to_primval_kind(left_ty)?;
let right_kind = self.ty_to_primval_kind(right_ty)?;
@ -162,29 +150,43 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
if bin_op == Offset {
if left_kind == Ptr && right_kind == PrimValKind::from_uint_size(self.memory.pointer_size()) {
let pointee_ty = left_ty.builtin_deref(true, ty::LvaluePreference::NoPreference).expect("Offset called on non-ptr type").ty;
let ptr = self.pointer_offset(left.to_ptr()?, pointee_ty, right.to_bytes()? as i64)?;
return Ok((PrimVal::Ptr(ptr), false));
let ptr = self.pointer_offset(left, pointee_ty, right.to_bytes()? as i64)?;
return Ok((ptr, false));
} else {
bug!("Offset used with wrong type");
}
}
let (l, r) = match (left, right) {
(PrimVal::Bytes(left_bytes), PrimVal::Bytes(right_bytes)) => (left_bytes, right_bytes),
// One argument is a pointer value -- this is handled separately
(PrimVal::Ptr(left_ptr), PrimVal::Ptr(right_ptr)) => {
return self.ptr_ops(bin_op, left_ptr, left_kind, right_ptr, right_kind);
}
(PrimVal::Ptr(ptr), PrimVal::Bytes(bytes)) => {
return self.ptr_ops(bin_op, ptr, left_kind, Pointer::from_int(bytes as u64), right_kind);
}
(PrimVal::Bytes(bytes), PrimVal::Ptr(ptr)) => {
return self.ptr_ops(bin_op, Pointer::from_int(bytes as u64), left_kind, ptr, right_kind);
}
(PrimVal::Undef, _) | (_, PrimVal::Undef) => return Err(EvalError::ReadUndefBytes),
// unrelated pointer ops
let op: Option<fn(&PrimVal, &PrimVal) -> bool> = match bin_op {
Eq => Some(PrimVal::eq),
Ne => Some(PrimVal::ne),
_ => None,
};
if let Some(op) = op {
// only floats can't be binary compared
let ok = left_kind != F32 && left_kind != F64;
let ok = ok && right_kind != F32 && right_kind != F64;
if ok {
return Ok((PrimVal::from_bool(op(&left, &right)), false));
}
}
if let (Ok(left), Ok(right)) = (left.to_ptr(), right.to_ptr()) {
if left.alloc_id == right.alloc_id {
return self.ptr_ops(
bin_op,
left.offset,
right.offset,
);
} else {
return Err(EvalError::InvalidPointerMath);
}
}
let l = left.to_bytes()?;
let r = right.to_bytes()?;
// These ops can have an RHS with a different numeric type.
if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) {
@ -260,41 +262,26 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
fn ptr_ops(
&self,
bin_op: mir::BinOp,
left: Pointer,
left_kind: PrimValKind,
right: Pointer,
right_kind: PrimValKind,
left: u64,
right: u64,
) -> EvalResult<'tcx, (PrimVal, bool)> {
use rustc::mir::BinOp::*;
use value::PrimValKind::*;
if left_kind != right_kind || !(left_kind.is_ptr() || left_kind == PrimValKind::from_uint_size(self.memory.pointer_size())) {
let msg = format!("unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
return Err(EvalError::Unimplemented(msg));
}
let val = match bin_op {
Eq => PrimVal::from_bool(left == right),
Ne => PrimVal::from_bool(left != right),
Lt | Le | Gt | Ge => {
if left.alloc_id == right.alloc_id {
PrimVal::from_bool(match bin_op {
Lt => left.offset < right.offset,
Le => left.offset <= right.offset,
Gt => left.offset > right.offset,
Ge => left.offset >= right.offset,
_ => bug!("We already established it has to be a comparison operator."),
})
} else {
return Err(EvalError::InvalidPointerMath);
}
PrimVal::from_bool(match bin_op {
Lt => left < right,
Le => left <= right,
Gt => left > right,
Ge => left >= right,
_ => bug!("We already established it has to be a comparison operator."),
})
}
Sub => {
if left.alloc_id == right.alloc_id {
return int_arithmetic!(left_kind, overflowing_sub, left.offset, right.offset);
} else {
return Err(EvalError::InvalidPointerMath);
}
let usize = PrimValKind::from_uint_size(self.memory.pointer_size());
return int_arithmetic!(usize, overflowing_sub, left, right);
}
_ => {
return Err(EvalError::ReadPointerAsBytes);

View File

@ -14,7 +14,6 @@ use error::{EvalResult, EvalError};
use eval_context::{EvalContext, StackPopCleanup};
use lvalue::{Global, GlobalId, Lvalue};
use value::{Value, PrimVal};
use memory::Pointer;
use syntax::codemap::Span;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
@ -33,23 +32,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
self.memory.clear_packed();
self.inc_step_counter_and_check_limit(1)?;
if self.stack.is_empty() {
if let Some((instance, ptr)) = self.memory.fetch_tls_dtor() {
trace!("Running TLS dtor {:?} on {:?}", instance, ptr);
// TODO: Potientially, this has to support all the other possible instances? See eval_fn_call in terminator/mod.rs
let mir = self.load_mir(instance.def)?;
self.push_stack_frame(
instance,
mir.span,
mir,
Lvalue::from_ptr(Pointer::zst_ptr()),
StackPopCleanup::None,
)?;
let arg_local = self.frame().mir.args_iter().next().ok_or(EvalError::AbiViolation("TLS dtor does not take enough arguments.".to_owned()))?;
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
self.write_value(Value::ByVal(PrimVal::Ptr(ptr)), dest, ty)?;
return Ok(true);
}
return Ok(false);
}
@ -141,6 +123,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
self.deallocate_local(old_val)?;
}
// Just a borrowck thing
EndRegion(..) => {}
// Defined to do nothing. These are added by optimization passes, to avoid changing the
@ -191,7 +174,7 @@ impl<'a, 'b, 'tcx> ConstantExtractor<'a, 'b, 'tcx> {
}
if self.ecx.tcx.has_attr(def_id, "linkage") {
trace!("Initializing an extern global with NULL");
self.ecx.globals.insert(cid, Global::initialized(self.ecx.tcx.type_of(def_id), Value::ByVal(PrimVal::Ptr(Pointer::from_int(0))), !shared));
self.ecx.globals.insert(cid, Global::initialized(self.ecx.tcx.type_of(def_id), Value::ByVal(PrimVal::Bytes(0)), !shared));
return;
}
self.try(|this| {

View File

@ -5,7 +5,6 @@ use syntax::codemap::Span;
use error::EvalResult;
use eval_context::{EvalContext, StackPopCleanup};
use lvalue::{Lvalue, LvalueExtra};
use memory::Pointer;
use value::PrimVal;
use value::Value;
@ -13,9 +12,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(crate) fn drop_lvalue(&mut self, lval: Lvalue<'tcx>, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
trace!("drop_lvalue: {:#?}", lval);
let val = match self.force_allocation(lval)? {
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Ptr(vtable)),
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len as u128)),
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => Value::ByVal(PrimVal::Ptr(ptr)),
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => Value::ByValPair(ptr, PrimVal::Ptr(vtable)),
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => Value::ByValPair(ptr, PrimVal::Bytes(len as u128)),
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => Value::ByVal(ptr),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span)
@ -50,7 +49,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
instance,
span,
mir,
Lvalue::from_ptr(Pointer::zst_ptr()),
Lvalue::zst(),
StackPopCleanup::None,
)?;

View File

@ -46,7 +46,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_primval(dest, PrimVal::Ptr(result_ptr), dest_ty)?;
self.write_primval(dest, result_ptr, dest_ty)?;
}
"assume" => {
@ -60,7 +60,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"atomic_load_acq" |
"volatile_load" => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
self.write_value(Value::ByRef(ptr), dest, ty)?;
}
@ -79,7 +79,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
let change = self.value_to_primval(arg_vals[1], ty)?;
let old = self.read_value(ptr, ty)?;
let old = match old {
@ -93,7 +93,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
let expect_old = self.value_to_primval(arg_vals[1], ty)?;
let change = self.value_to_primval(arg_vals[2], ty)?;
let old = self.read_value(ptr, ty)?;
@ -103,7 +103,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
};
let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
let dest = self.force_allocation(dest)?.to_ptr();
let dest = self.force_allocation(dest)?.to_ptr()?;
self.write_pair_to_ptr(old, val, dest, dest_ty)?;
self.write_primval(Lvalue::from_ptr(ptr), change, ty)?;
}
@ -114,7 +114,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" |
"atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
let change = self.value_to_primval(arg_vals[1], ty)?;
let old = self.read_value(ptr, ty)?;
let old = match old {
@ -143,11 +143,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// FIXME: check whether overlapping occurs
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
let elem_align = self.type_align(elem_ty)?;
let src = arg_vals[0].read_ptr(&self.memory)?;
let dest = arg_vals[1].read_ptr(&self.memory)?;
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
self.memory.copy(src, dest, count * elem_size, elem_align)?;
if elem_size != 0 {
let elem_align = self.type_align(elem_ty)?;
let src = arg_vals[0].read_ptr(&self.memory)?;
let dest = arg_vals[1].read_ptr(&self.memory)?;
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
self.memory.copy(src, dest, count * elem_size, elem_align)?;
}
}
"ctpop" |
@ -163,7 +165,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"discriminant_value" => {
let ty = substs.type_at(0);
let adt_ptr = arg_vals[0].read_ptr(&self.memory)?;
let adt_ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
}
@ -259,7 +261,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
};
match dest {
Lvalue::Local { frame, local, field } => self.modify_local(frame, local, field.map(|(i, _)| i), init)?,
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?,
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => self.memory.write_repeat(ptr.to_ptr()?, 0, size)?,
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat ptr target"),
Lvalue::Global(cid) => self.modify_global(cid, init)?,
}
@ -297,7 +299,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_primval(dest, PrimVal::Ptr(result_ptr), dest_ty)?;
self.write_primval(dest, result_ptr, dest_ty)?;
}
"overflowing_sub" => {
@ -388,9 +390,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let dest_align = self.type_align(dest_ty)?;
let size = self.type_size(dest_ty)?.expect("transmute() type must be sized");
if dest_align < src_align {
let ptr = self.force_allocation(dest)?.to_ptr();
let ptr = self.force_allocation(dest)?.to_ptr()?;
self.memory.mark_packed(ptr, size);
self.write_value_to_ptr(arg_vals[0], ptr, dest_ty)?;
self.write_value_to_ptr(arg_vals[0], PrimVal::Ptr(ptr), dest_ty)?;
} else {
self.write_value(arg_vals[0], dest, dest_ty)?;
}
@ -401,7 +403,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let uninit = |this: &mut Self, val: Value| {
match val {
Value::ByRef(ptr) => {
this.memory.mark_definedness(ptr, size, false)?;
this.memory.mark_definedness(PrimVal::Ptr(ptr), size, false)?;
Ok(Value::ByRef(ptr))
},
_ => Ok(Value::ByVal(PrimVal::Undef)),
@ -422,7 +424,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ty_align = self.type_align(ty)?;
let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
let size = self.type_size(ty)?.expect("write_bytes() type must be sized");
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
if count > 0 {
self.memory.check_align(ptr, ty_align, size * count)?;

View File

@ -388,7 +388,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ptr_size = self.memory.pointer_size();
let (_, vtable) = self.eval_operand(&arg_operands[0])?.expect_ptr_vtable_pair(&self.memory)?;
let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3), self.memory.layout)?)?;
let instance = self.memory.get_fn(fn_ptr.alloc_id)?;
let instance = self.memory.get_fn(fn_ptr.to_ptr()?.alloc_id)?;
let mut arg_operands = arg_operands.to_vec();
let ty = self.operand_ty(&arg_operands[0]);
let ty = self.get_field_ty(ty, 0)?;
@ -431,11 +431,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
};
let (return_lvalue, return_to_block) = match destination {
Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)),
None => {
// FIXME(solson)
let lvalue = Lvalue::from_ptr(Pointer::never_ptr());
(lvalue, StackPopCleanup::None)
}
None => (Lvalue::undef(), StackPopCleanup::None),
};
self.push_stack_frame(
@ -579,7 +575,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
"__rust_deallocate" => {
let ptr = args[0].read_ptr(&self.memory)?;
let ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
// FIXME: insert sanity check for size and align?
let _old_size = self.value_to_primval(args[1], usize)?.to_u64()?;
let _align = self.value_to_primval(args[2], usize)?.to_u64()?;
@ -587,7 +583,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
},
"__rust_reallocate" => {
let ptr = args[0].read_ptr(&self.memory)?;
let ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
let size = self.value_to_primval(args[2], usize)?.to_u64()?;
let align = self.value_to_primval(args[3], usize)?.to_u64()?;
let new_ptr = self.memory.reallocate(ptr, size, align)?;
@ -598,7 +594,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure
let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
let f = args[0].read_ptr(&self.memory)?;
let f = args[0].read_ptr(&self.memory)?.to_ptr()?;
let data = args[1].read_ptr(&self.memory)?;
let f_instance = self.memory.get_fn(f.alloc_id)?;
self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?;
@ -610,13 +606,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
f_instance,
mir.span,
mir,
Lvalue::from_ptr(Pointer::zst_ptr()),
Lvalue::zst(),
StackPopCleanup::Goto(dest_block),
)?;
let arg_local = self.frame().mir.args_iter().next().ok_or(EvalError::AbiViolation("Argument to __rust_maybe_catch_panic does not take enough arguments.".to_owned()))?;
let arg_dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
self.write_value(Value::ByVal(PrimVal::Ptr(data)), arg_dest, u8_ptr_ty)?;
self.write_primval(arg_dest, data, u8_ptr_ty)?;
// We ourselbes return 0
self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?;
@ -655,7 +651,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let num = self.value_to_primval(args[2], usize)?.to_u64()?;
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position(|&c| c == val) {
let new_ptr = ptr.offset(num - idx as u64 - 1, self.memory.layout)?;
self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?;
self.write_primval(dest, new_ptr, dest_ty)?;
} else {
self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?;
}
@ -667,7 +663,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let num = self.value_to_primval(args[2], usize)?.to_u64()?;
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position(|&c| c == val) {
let new_ptr = ptr.offset(idx as u64, self.memory.layout)?;
self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?;
self.write_primval(dest, new_ptr, dest_ty)?;
} else {
self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?;
}
@ -675,7 +671,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"getenv" => {
{
let name_ptr = args[0].read_ptr(&self.memory)?;
let name_ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
let name = self.memory.read_c_str(name_ptr)?;
info!("ignored env var request for `{:?}`", ::std::str::from_utf8(name));
}
@ -718,7 +714,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"mmap" => {
// This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
let addr = args[0].read_ptr(&self.memory)?;
self.write_primval(dest, PrimVal::Ptr(addr), dest_ty)?;
self.write_primval(dest, addr, dest_ty)?;
}
// Hook pthread calls that go to the thread-local storage memory subsystem
@ -726,8 +722,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let key_ptr = args[0].read_ptr(&self.memory)?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
let dtor_ptr = args[1].read_ptr(&self.memory)?;
let dtor = if dtor_ptr.is_null_ptr() { None } else { Some(self.memory.get_fn(dtor_ptr.alloc_id)?) };
let dtor = match args[1].read_ptr(&self.memory)? {
PrimVal::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr.alloc_id)?),
PrimVal::Bytes(0) => None,
PrimVal::Bytes(_) => return Err(EvalError::ReadBytesAsPointer),
PrimVal::Undef => return Err(EvalError::ReadUndefBytes),
};
// Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
let key_type = self.operand_ty(&arg_operands[0]).builtin_deref(true, ty::LvaluePreference::NoPreference)
@ -743,7 +743,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
return Err(EvalError::OutOfTls);
}
// TODO: Does this need checking for alignment?
self.memory.write_uint(key_ptr, key, key_size.bytes())?;
self.memory.write_uint(key_ptr.to_ptr()?, key, key_size.bytes())?;
// Return success (0)
self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?;
@ -759,7 +759,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t
let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey;
let ptr = self.memory.load_tls(key)?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
self.write_primval(dest, ptr, dest_ty)?;
}
"pthread_setspecific" => {
// The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t

View File

@ -2,6 +2,7 @@ use rustc::traits::{self, Reveal};
use eval_context::EvalContext;
use memory::Pointer;
use value::{Value, PrimVal};
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
@ -9,7 +10,7 @@ use rustc::ty::{self, Ty};
use syntax::codemap::DUMMY_SP;
use syntax::ast;
use error::EvalResult;
use error::{EvalResult, EvalError};
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
@ -73,16 +74,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
pub fn read_drop_type_from_vtable(&self, vtable: Pointer) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
let drop_fn = self.memory.read_ptr(vtable)?;
// just a sanity check
assert_eq!(drop_fn.offset, 0);
// some values don't need to call a drop impl, so the value is null
if drop_fn == Pointer::from_int(0) {
Ok(None)
} else {
self.memory.get_fn(drop_fn.alloc_id).map(Some)
// we don't care about the pointee type, we just want a pointer
match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null
Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn.alloc_id).map(Some),
_ => Err(EvalError::ReadBytesAsPointer),
}
}

View File

@ -2,6 +2,7 @@
#![allow(float_cmp)]
use std::mem::transmute;
use rustc::ty::layout::TargetDataLayout;
use error::{EvalError, EvalResult};
use memory::{Memory, Pointer};
@ -47,7 +48,7 @@ pub enum Value {
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes
/// of a simple value, a pointer into another `Allocation`, or be undefined.
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PrimVal {
/// The raw bytes of a simple value.
Bytes(u128),
@ -74,33 +75,33 @@ pub enum PrimValKind {
}
impl<'a, 'tcx: 'a> Value {
pub(super) fn read_ptr(&self, mem: &Memory<'a, 'tcx>) -> EvalResult<'tcx, Pointer> {
pub(super) fn read_ptr(&self, mem: &Memory<'a, 'tcx>) -> EvalResult<'tcx, PrimVal> {
use self::Value::*;
match *self {
ByRef(ptr) => mem.read_ptr(ptr),
ByVal(ptr) | ByValPair(ptr, _) => ptr.to_ptr(),
ByVal(ptr) | ByValPair(ptr, _) => Ok(ptr),
}
}
pub(super) fn expect_ptr_vtable_pair(
&self,
mem: &Memory<'a, 'tcx>
) -> EvalResult<'tcx, (Pointer, Pointer)> {
) -> EvalResult<'tcx, (PrimVal, Pointer)> {
use self::Value::*;
match *self {
ByRef(ref_ptr) => {
let ptr = mem.read_ptr(ref_ptr)?;
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?)?;
Ok((ptr, vtable))
Ok((ptr, vtable.to_ptr()?))
}
ByValPair(ptr, vtable) => Ok((ptr.to_ptr()?, vtable.to_ptr()?)),
ByValPair(ptr, vtable) => Ok((ptr, vtable.to_ptr()?)),
_ => bug!("expected ptr and vtable, got {:?}", self),
}
}
pub(super) fn expect_slice(&self, mem: &Memory<'a, 'tcx>) -> EvalResult<'tcx, (Pointer, u64)> {
pub(super) fn expect_slice(&self, mem: &Memory<'a, 'tcx>) -> EvalResult<'tcx, (PrimVal, u64)> {
use self::Value::*;
match *self {
ByRef(ref_ptr) => {
@ -111,7 +112,7 @@ impl<'a, 'tcx: 'a> Value {
ByValPair(ptr, val) => {
let len = val.to_u128()?;
assert_eq!(len as u64 as u128, len);
Ok((ptr.to_ptr()?, len as u64))
Ok((ptr, len as u64))
},
_ => unimplemented!(),
}
@ -146,14 +147,14 @@ impl<'tcx> PrimVal {
pub fn to_bytes(self) -> EvalResult<'tcx, u128> {
match self {
PrimVal::Bytes(b) => Ok(b),
PrimVal::Ptr(p) => p.to_int().map(|b| b as u128),
PrimVal::Ptr(_) => Err(EvalError::ReadPointerAsBytes),
PrimVal::Undef => Err(EvalError::ReadUndefBytes),
}
}
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
match self {
PrimVal::Bytes(b) => Ok(Pointer::from_int(b as u64)),
PrimVal::Bytes(_) => Err(EvalError::ReadBytesAsPointer),
PrimVal::Ptr(p) => Ok(p),
PrimVal::Undef => Err(EvalError::ReadUndefBytes),
}
@ -203,6 +204,67 @@ impl<'tcx> PrimVal {
_ => Err(EvalError::InvalidBool),
}
}
pub fn signed_offset(self, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> {
match self {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(PrimVal::Bytes(signed_offset(b as u64, i, layout)? as u128))
},
PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(PrimVal::Ptr),
PrimVal::Undef => Err(EvalError::ReadUndefBytes),
}
}
pub fn offset(self, i: u64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> {
match self {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(PrimVal::Bytes(offset(b as u64, i, layout)? as u128))
},
PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(PrimVal::Ptr),
PrimVal::Undef => Err(EvalError::ReadUndefBytes),
}
}
pub fn wrapping_signed_offset(self, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> {
match self {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(PrimVal::Bytes(wrapping_signed_offset(b as u64, i, layout) as u128))
},
PrimVal::Ptr(ptr) => Ok(PrimVal::Ptr(ptr.wrapping_signed_offset(i, layout))),
PrimVal::Undef => Err(EvalError::ReadUndefBytes),
}
}
}
pub fn signed_offset<'tcx>(val: u64, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, u64> {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
// this formula only works for true negative values, it panics for zero!
let n = u64::max_value() - (i as u64) + 1;
val.checked_sub(n).ok_or(EvalError::OverflowingMath)
} else {
offset(val, i as u64, layout)
}
}
pub fn offset<'tcx>(val: u64, i: u64, layout: &TargetDataLayout) -> EvalResult<'tcx, u64> {
if let Some(res) = val.checked_add(i) {
if res as u128 >= (1u128 << layout.pointer_size.bits()) {
Err(EvalError::OverflowingMath)
} else {
Ok(res)
}
} else {
Err(EvalError::OverflowingMath)
}
}
pub fn wrapping_signed_offset<'tcx>(val: u64, i: i64, layout: &TargetDataLayout) -> u64 {
(val.wrapping_add(i as u64) as u128 % (1u128 << layout.pointer_size.bits())) as u64
}
impl PrimValKind {

View File

@ -4,5 +4,5 @@ fn main() {
std::mem::transmute::<&usize, &fn(i32)>(&b)
};
(*g)(42) //~ ERROR tried to use an integer pointer or a dangling pointer as a function pointer
(*g)(42) //~ ERROR a memory access tried to interpret some bytes as a pointer
}

View File

@ -3,5 +3,5 @@ fn main() {
std::mem::transmute::<usize, fn(i32)>(42)
};
g(42) //~ ERROR tried to use an integer pointer or a dangling pointer as a function pointer
g(42) //~ ERROR a memory access tried to interpret some bytes as a pointer
}

View File

@ -1,4 +1,4 @@
fn main() {
let x: i32 = unsafe { *std::ptr::null() }; //~ ERROR: tried to access memory through an invalid pointer
let x: i32 = unsafe { *std::ptr::null() }; //~ ERROR: a memory access tried to interpret some bytes as a pointer
panic!("this should never print: {}", x);
}

View File

@ -1,5 +1,5 @@
fn main() {
let p = 42 as *const i32;
let x = unsafe { *p }; //~ ERROR: tried to access memory through an invalid pointer
let x = unsafe { *p }; //~ ERROR: a memory access tried to interpret some bytes as a pointer
panic!("this should never print: {}", x);
}

View File

@ -3,19 +3,24 @@ extern crate compiletest_rs as compiletest;
use std::path::{PathBuf, Path};
use std::io::Write;
fn compile_fail(sysroot: &Path) {
let flags = format!("--sysroot {} -Dwarnings", sysroot.to_str().expect("non utf8 path"));
for_all_targets(sysroot, |target| {
let mut config = compiletest::default_config();
config.host_rustcflags = Some(flags.clone());
config.mode = "compile-fail".parse().expect("Invalid mode");
config.run_lib_path = Path::new(sysroot).join("lib").join("rustlib").join(&target).join("lib");
config.rustc_path = "target/debug/miri".into();
config.src_base = PathBuf::from("tests/compile-fail".to_string());
config.target = target.to_owned();
config.target_rustcflags = Some(flags.clone());
compiletest::run_tests(&config);
});
fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, fullmir: bool) {
let mut config = compiletest::default_config();
config.mode = "compile-fail".parse().expect("Invalid mode");
config.rustc_path = "target/debug/miri".into();
if fullmir {
if host != target {
// skip fullmir on nonhost
return;
}
let sysroot = Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST");
config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap()));
config.src_base = PathBuf::from(path.to_string());
} else {
config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap()));
config.src_base = PathBuf::from(path.to_string());
}
config.target = target.to_owned();
compiletest::run_tests(&config);
}
fn run_pass() {
@ -27,13 +32,21 @@ fn run_pass() {
compiletest::run_tests(&config);
}
fn miri_pass(path: &str, target: &str, host: &str) {
fn miri_pass(path: &str, target: &str, host: &str, fullmir: bool) {
let mut config = compiletest::default_config();
config.mode = "mir-opt".parse().expect("Invalid mode");
config.src_base = PathBuf::from(path);
config.target = target.to_owned();
config.host = host.to_owned();
config.rustc_path = PathBuf::from("target/debug/miri");
if fullmir {
if host != target {
// skip fullmir on nonhost
return;
}
let sysroot = Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST");
config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap()));
}
// don't actually execute the final binary, it might be for other targets and we only care
// about running miri, not the binary.
config.runtool = Some("echo \"\" || ".to_owned());
@ -116,6 +129,7 @@ fn compile_test() {
let sysroot = libs.join("rustlib").join(&host).join("lib");
let paths = std::env::join_paths(&[libs, sysroot]).unwrap();
cmd.env(compiletest::procsrv::dylib_env_var(), paths);
cmd.env("MIRI_SYSROOT", Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST"));
match cmd.output() {
Ok(ref output) if output.status.success() => {
@ -197,9 +211,11 @@ fn compile_test() {
} else {
run_pass();
for_all_targets(sysroot, |target| {
miri_pass("tests/run-pass", &target, host);
miri_pass("tests/run-pass", &target, host, false);
compile_fail(sysroot, "tests/compile-fail", &target, host, false);
});
compile_fail(sysroot);
miri_pass("tests/run-pass-fullmir", host, host, true);
compile_fail(sysroot, "tests/compile-fail-fullmir", host, host, true);
}
}

View File

@ -0,0 +1,141 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(never_type)]
#![allow(unreachable_code)]
#[allow(unused)]
fn never_returns() {
loop {
break loop {};
}
}
pub fn main() {
let value = 'outer: loop {
if 1 == 1 {
break 13;
} else {
let _never: ! = loop {
break loop {
break 'outer panic!();
}
};
}
};
assert_eq!(value, 13);
let x = [1, 3u32, 5];
let y = [17];
let z = [];
let coerced: &[_] = loop {
match 2 {
1 => break &x,
2 => break &y,
3 => break &z,
_ => (),
}
};
assert_eq!(coerced, &[17u32]);
let trait_unified = loop {
break if true {
break Default::default()
} else {
break [13, 14]
};
};
assert_eq!(trait_unified, [0, 0]);
let trait_unified_2 = loop {
if false {
break [String::from("Hello")]
} else {
break Default::default()
};
};
assert_eq!(trait_unified_2, [""]);
let trait_unified_3 = loop {
break if false {
break [String::from("Hello")]
} else {
["Yes".into()]
};
};
assert_eq!(trait_unified_3, ["Yes"]);
let regular_break = loop {
if true {
break;
} else {
break break Default::default();
}
};
assert_eq!(regular_break, ());
let regular_break_2 = loop {
if true {
break Default::default();
} else {
break;
}
};
assert_eq!(regular_break_2, ());
let regular_break_3 = loop {
break if true {
Default::default()
} else {
break;
}
};
assert_eq!(regular_break_3, ());
let regular_break_4 = loop {
break ();
break;
};
assert_eq!(regular_break_4, ());
let regular_break_5 = loop {
break;
break ();
};
assert_eq!(regular_break_5, ());
let nested_break_value = 'outer2: loop {
let _a: u32 = 'inner: loop {
if true {
break 'outer2 "hello";
} else {
break 'inner 17;
}
};
panic!();
};
assert_eq!(nested_break_value, "hello");
let break_from_while_cond = loop {
'inner_loop: while break 'inner_loop {
panic!();
}
break 123;
};
assert_eq!(break_from_while_cond, 123);
let break_from_while_to_outer = 'outer_loop: loop {
while break 'outer_loop 567 {
panic!("from_inner");
}
panic!("from outer");
};
assert_eq!(break_from_while_to_outer, 567);
}

View File

@ -0,0 +1,58 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C debug-assertions
use std::slice;
fn foo<T>(v: &[T]) -> Option<&[T]> {
let mut it = v.iter();
for _ in 0..5 {
let _ = it.next();
}
Some(it.as_slice())
}
fn foo_mut<T>(v: &mut [T]) -> Option<&mut [T]> {
let mut it = v.iter_mut();
for _ in 0..5 {
let _ = it.next();
}
Some(it.into_slice())
}
pub fn main() {
// In a slice of zero-size elements the pointer is meaningless.
// Ensure iteration still works even if the pointer is at the end of the address space.
let slice: &[()] = unsafe { slice::from_raw_parts(-5isize as *const (), 10) };
assert_eq!(slice.len(), 10);
assert_eq!(slice.iter().count(), 10);
// .nth() on the iterator should also behave correctly
let mut it = slice.iter();
assert!(it.nth(5).is_some());
assert_eq!(it.count(), 4);
// Converting Iter to a slice should never have a null pointer
assert!(foo(slice).is_some());
// Test mutable iterators as well
let slice: &mut [()] = unsafe { slice::from_raw_parts_mut(-5isize as *mut (), 10) };
assert_eq!(slice.len(), 10);
assert_eq!(slice.iter_mut().count(), 10);
{
let mut it = slice.iter_mut();
assert!(it.nth(5).is_some());
assert_eq!(it.count(), 4);
}
assert!(foo_mut(slice).is_some())
}

View File

@ -0,0 +1,81 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(untagged_unions)]
#![allow(unions_with_drop_fields)]
#[repr(C)]
struct Pair<T, U>(T, U);
#[repr(C)]
struct Triple<T>(T, T, T);
#[repr(C)]
union U<A, B> {
a: Pair<A, A>,
b: B,
}
#[repr(C)]
union W<A, B> {
a: A,
b: B,
}
#[cfg(target_endian = "little")]
unsafe fn check() {
let mut u = U::<u8, u16> { b: 0xDE_DE };
u.a.0 = 0xBE;
assert_eq!(u.b, 0xDE_BE);
let mut u = U::<u16, u32> { b: 0xDEAD_DEAD };
u.a.0 = 0xBEEF;
assert_eq!(u.b, 0xDEAD_BEEF);
let mut u = U::<u32, u64> { b: 0xDEADBEEF_DEADBEEF };
u.a.0 = 0xBAADF00D;
assert_eq!(u.b, 0xDEADBEEF_BAADF00D);
let mut w = W::<Pair<Triple<u8>, u8>, u32> { b: 0xDEAD_DEAD };
w.a.0 = Triple(0, 0, 0);
assert_eq!(w.b, 0xDE00_0000);
let mut w = W::<Pair<u8, Triple<u8>>, u32> { b: 0xDEAD_DEAD };
w.a.1 = Triple(0, 0, 0);
assert_eq!(w.b, 0x0000_00AD);
}
#[cfg(target_endian = "big")]
unsafe fn check() {
let mut u = U::<u8, u16> { b: 0xDE_DE };
u.a.0 = 0xBE;
assert_eq!(u.b, 0xBE_DE);
let mut u = U::<u16, u32> { b: 0xDEAD_DEAD };
u.a.0 = 0xBEEF;
assert_eq!(u.b, 0xBEEF_DEAD);
let mut u = U::<u32, u64> { b: 0xDEADBEEF_DEADBEEF };
u.a.0 = 0xBAADF00D;
assert_eq!(u.b, 0xBAADF00D_DEADBEEF);
let mut w = W::<Pair<Triple<u8>, u8>, u32> { b: 0xDEAD_DEAD };
w.a.0 = Triple(0, 0, 0);
assert_eq!(w.b, 0x0000_00AD);
let mut w = W::<Pair<u8, Triple<u8>>, u32> { b: 0xDEAD_DEAD };
w.a.1 = Triple(0, 0, 0);
assert_eq!(w.b, 0xDE00_0000);
}
fn main() {
unsafe {
check();
}
}

View File

@ -0,0 +1,28 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::collections::BinaryHeap;
use std::iter::Iterator;
fn main() {
const N: usize = 8;
for len in 0..N {
let mut tester = BinaryHeap::with_capacity(len);
assert_eq!(tester.len(), 0);
assert!(tester.capacity() >= len);
for _ in 0..len {
tester.push(());
}
assert_eq!(tester.len(), len);
assert_eq!(tester.iter().count(), len);
tester.clear();
}
}