Merge pull request #247 from RalfJung/packed
Re-do packed memory accesses
This commit is contained in:
commit
56d4de303f
@ -17,7 +17,7 @@ use syntax::abi::Abi;
|
||||
|
||||
use error::{EvalError, EvalResult};
|
||||
use lvalue::{Global, GlobalId, Lvalue, LvalueExtra};
|
||||
use memory::{Memory, MemoryPointer, TlsKey};
|
||||
use memory::{Memory, MemoryPointer, TlsKey, HasMemory};
|
||||
use operator;
|
||||
use value::{PrimVal, PrimValKind, Value, Pointer};
|
||||
|
||||
@ -352,7 +352,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
.expect("global should have been cached (static)");
|
||||
match global_value.value {
|
||||
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
|
||||
Value::ByRef(ptr) => self.memory.mark_static_initalized(ptr.to_ptr()?.alloc_id, mutable)?,
|
||||
Value::ByRef(ptr, _aligned) =>
|
||||
// Alignment does not matter for this call
|
||||
self.memory.mark_static_initalized(ptr.to_ptr()?.alloc_id, mutable)?,
|
||||
Value::ByVal(val) => if let PrimVal::Ptr(ptr) = val {
|
||||
self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
|
||||
},
|
||||
@ -408,7 +410,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
|
||||
if let Some(Value::ByRef(ptr)) = local {
|
||||
if let Some(Value::ByRef(ptr, _aligned)) = local {
|
||||
trace!("deallocating local");
|
||||
let ptr = ptr.to_ptr()?;
|
||||
self.memory.dump_alloc(ptr.alloc_id);
|
||||
@ -446,6 +448,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let dest = Lvalue::Ptr {
|
||||
ptr: dest_ptr.into(),
|
||||
extra: LvalueExtra::DowncastVariant(variant_idx),
|
||||
aligned: true,
|
||||
};
|
||||
|
||||
self.assign_fields(dest, dest_ty, operands)
|
||||
@ -528,15 +531,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
self.inc_step_counter_and_check_limit(operands.len() as u64)?;
|
||||
use rustc::ty::layout::Layout::*;
|
||||
match *dest_layout {
|
||||
Univariant { ref variant, .. } => {
|
||||
if variant.packed {
|
||||
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, variant.stride().bytes());
|
||||
}
|
||||
self.assign_fields(dest, dest_ty, operands)?;
|
||||
}
|
||||
|
||||
Array { .. } => {
|
||||
Univariant { .. } | Array { .. } => {
|
||||
self.assign_fields(dest, dest_ty, operands)?;
|
||||
}
|
||||
|
||||
@ -547,10 +542,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
.expect("broken mir: Adt variant id invalid")
|
||||
.to_u128_unchecked();
|
||||
let discr_size = discr.size().bytes();
|
||||
if variants[variant].packed {
|
||||
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, variants[variant].stride().bytes());
|
||||
}
|
||||
|
||||
self.assign_discr_and_fields(
|
||||
dest,
|
||||
@ -587,12 +578,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield, .. } => {
|
||||
StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
|
||||
if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
|
||||
if nonnull.packed {
|
||||
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, nonnull.stride().bytes());
|
||||
}
|
||||
if nndiscr == variant as u64 {
|
||||
self.assign_fields(dest, dest_ty, operands)?;
|
||||
} else {
|
||||
@ -682,7 +669,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
Ref(_, _, ref lvalue) => {
|
||||
let src = self.eval_lvalue(lvalue)?;
|
||||
let (ptr, extra) = self.force_allocation(src)?.to_ptr_and_extra();
|
||||
// We ignore the alignment of the lvalue here -- this rvalue produces sth. of type &, which must always be aligned.
|
||||
let (ptr, extra, _aligned) = self.force_allocation(src)?.to_ptr_extra_aligned();
|
||||
let ty = self.lvalue_ty(lvalue);
|
||||
|
||||
let val = match extra {
|
||||
@ -695,7 +683,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
// Check alignment and non-NULLness.
|
||||
let (_, align) = self.size_and_align_of_dst(ty, val)?;
|
||||
self.memory.check_align(ptr, align, 0)?;
|
||||
self.memory.check_align(ptr, align)?;
|
||||
|
||||
self.write_value(val, dest, dest_ty)?;
|
||||
}
|
||||
@ -737,7 +725,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let src_ty = self.operand_ty(operand);
|
||||
if self.type_is_fat_ptr(src_ty) {
|
||||
match (src, self.type_is_fat_ptr(dest_ty)) {
|
||||
(Value::ByRef(_), _) |
|
||||
(Value::ByRef(..), _) |
|
||||
(Value::ByValPair(..), true) => {
|
||||
self.write_value(src, dest, dest_ty)?;
|
||||
},
|
||||
@ -1018,15 +1006,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
// -1 since we don't store the return value
|
||||
match self.stack[frame].locals[local.index() - 1] {
|
||||
None => return Err(EvalError::DeadLocal),
|
||||
Some(Value::ByRef(ptr)) => {
|
||||
Lvalue::from_primval_ptr(ptr)
|
||||
Some(Value::ByRef(ptr, aligned)) => {
|
||||
Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None }
|
||||
},
|
||||
Some(val) => {
|
||||
let ty = self.stack[frame].mir.local_decls[local].ty;
|
||||
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
|
||||
let substs = self.stack[frame].instance.substs;
|
||||
let ptr = self.alloc_ptr_with_substs(ty, substs)?;
|
||||
self.stack[frame].locals[local.index() - 1] = Some(Value::ByRef(ptr.into())); // it stays live
|
||||
self.stack[frame].locals[local.index() - 1] = Some(Value::by_ref(ptr.into())); // it stays live
|
||||
self.write_value_to_ptr(val, ptr.into(), ty)?;
|
||||
Lvalue::from_ptr(ptr)
|
||||
}
|
||||
@ -1036,7 +1024,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
Lvalue::Global(cid) => {
|
||||
let global_val = *self.globals.get(&cid).expect("global not cached");
|
||||
match global_val.value {
|
||||
Value::ByRef(ptr) => Lvalue::from_primval_ptr(ptr),
|
||||
Value::ByRef(ptr, aligned) =>
|
||||
Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None },
|
||||
_ => {
|
||||
let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.instance.substs)?;
|
||||
self.memory.mark_static(ptr.alloc_id);
|
||||
@ -1047,7 +1036,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
let lval = self.globals.get_mut(&cid).expect("already checked");
|
||||
*lval = Global {
|
||||
value: Value::ByRef(ptr.into()),
|
||||
value: Value::by_ref(ptr.into()),
|
||||
.. global_val
|
||||
};
|
||||
Lvalue::from_ptr(ptr)
|
||||
@ -1061,14 +1050,16 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
/// ensures this Value is not a ByRef
|
||||
pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
|
||||
match value {
|
||||
Value::ByRef(ptr) => self.read_value(ptr, ty),
|
||||
Value::ByRef(ptr, aligned) => {
|
||||
self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
|
||||
}
|
||||
other => Ok(other),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
|
||||
match self.follow_by_ref_value(value, ty)? {
|
||||
Value::ByRef(_) => bug!("follow_by_ref_value can't result in `ByRef`"),
|
||||
Value::ByRef(..) => bug!("follow_by_ref_value can't result in `ByRef`"),
|
||||
|
||||
Value::ByVal(primval) => {
|
||||
self.ensure_valid_value(primval, ty)?;
|
||||
@ -1131,9 +1122,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
self.write_value_possibly_by_val(src_val, write_dest, dest.value, dest_ty)
|
||||
},
|
||||
|
||||
Lvalue::Ptr { ptr, extra } => {
|
||||
Lvalue::Ptr { ptr, extra, aligned } => {
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
self.write_value_to_ptr(src_val, ptr, dest_ty)
|
||||
self.write_maybe_aligned(aligned,
|
||||
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty))
|
||||
}
|
||||
|
||||
Lvalue::Local { frame, local } => {
|
||||
@ -1156,7 +1148,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
old_dest_val: Value,
|
||||
dest_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx> {
|
||||
if let Value::ByRef(dest_ptr) = old_dest_val {
|
||||
if let Value::ByRef(dest_ptr, aligned) = old_dest_val {
|
||||
// If the value is already `ByRef` (that is, backed by an `Allocation`),
|
||||
// then we must write the new value into this allocation, because there may be
|
||||
// other pointers into the allocation. These other pointers are logically
|
||||
@ -1164,9 +1156,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
//
|
||||
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
|
||||
// knew for certain that there were no outstanding pointers to this allocation.
|
||||
self.write_value_to_ptr(src_val, dest_ptr, dest_ty)?;
|
||||
self.write_maybe_aligned(aligned,
|
||||
|ectx| ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty))?;
|
||||
|
||||
} else if let Value::ByRef(src_ptr) = src_val {
|
||||
} else if let Value::ByRef(src_ptr, aligned) = src_val {
|
||||
// If the value is not `ByRef`, then we know there are no pointers to it
|
||||
// and we can simply overwrite the `Value` in the locals array directly.
|
||||
//
|
||||
@ -1178,13 +1171,16 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
// It is a valid optimization to attempt reading a primitive value out of the
|
||||
// source and write that into the destination without making an allocation, so
|
||||
// we do so here.
|
||||
if let Ok(Some(src_val)) = self.try_read_value(src_ptr, dest_ty) {
|
||||
write_dest(self, src_val)?;
|
||||
} else {
|
||||
let dest_ptr = self.alloc_ptr(dest_ty)?.into();
|
||||
self.copy(src_ptr, dest_ptr, dest_ty)?;
|
||||
write_dest(self, Value::ByRef(dest_ptr))?;
|
||||
}
|
||||
self.read_maybe_aligned(aligned, |ectx| {
|
||||
if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
|
||||
write_dest(ectx, src_val)?;
|
||||
} else {
|
||||
let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
|
||||
ectx.copy(src_ptr, dest_ptr, dest_ty)?;
|
||||
write_dest(ectx, Value::by_ref(dest_ptr))?;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
} else {
|
||||
// Finally, we have the simple case where neither source nor destination are
|
||||
@ -1201,7 +1197,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
dest_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx> {
|
||||
match value {
|
||||
Value::ByRef(ptr) => self.copy(ptr, dest, dest_ty),
|
||||
Value::ByRef(ptr, aligned) => {
|
||||
self.read_maybe_aligned(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
|
||||
},
|
||||
Value::ByVal(primval) => {
|
||||
let size = self.type_size(dest_ty)?.expect("dest type must be sized");
|
||||
self.memory.write_primval(dest, primval, size)
|
||||
@ -1464,7 +1462,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
|
||||
(&ty::TyArray(_, length), &ty::TySlice(_)) => {
|
||||
let ptr = src.read_ptr(&self.memory)?;
|
||||
let ptr = src.into_ptr(&mut self.memory)?;
|
||||
// u64 cast is from usize to u64, which is always good
|
||||
self.write_value(ptr.to_value_with_len(length as u64), dest, dest_ty)
|
||||
}
|
||||
@ -1478,7 +1476,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let trait_ref = data.principal().unwrap().with_self_ty(self.tcx, src_pointee_ty);
|
||||
let trait_ref = self.tcx.erase_regions(&trait_ref);
|
||||
let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
|
||||
let ptr = src.read_ptr(&self.memory)?;
|
||||
let ptr = src.into_ptr(&mut self.memory)?;
|
||||
self.write_value(ptr.to_value_with_vtable(vtable), dest, dest_ty)
|
||||
},
|
||||
|
||||
@ -1521,8 +1519,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
//let src = adt::MaybeSizedValue::sized(src);
|
||||
//let dst = adt::MaybeSizedValue::sized(dst);
|
||||
let src_ptr = match src {
|
||||
Value::ByRef(ptr) => ptr,
|
||||
_ => bug!("expected pointer, got {:?}", src),
|
||||
Value::ByRef(ptr, true) => ptr,
|
||||
// TODO: Is it possible for unaligned pointers to occur here?
|
||||
_ => bug!("expected aligned pointer, got {:?}", src),
|
||||
};
|
||||
|
||||
// FIXME(solson)
|
||||
@ -1541,7 +1540,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
if src_fty == dst_fty {
|
||||
self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
|
||||
} else {
|
||||
self.unsize_into(Value::ByRef(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?;
|
||||
self.unsize_into(Value::by_ref(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -1568,9 +1567,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
Err(err) => {
|
||||
panic!("Failed to access local: {:?}", err);
|
||||
}
|
||||
Ok(Value::ByRef(ptr)) => match ptr.into_inner_primval() {
|
||||
Ok(Value::ByRef(ptr, aligned)) => match ptr.into_inner_primval() {
|
||||
PrimVal::Ptr(ptr) => {
|
||||
write!(msg, " by ref:").unwrap();
|
||||
write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }).unwrap();
|
||||
allocs.push(ptr.alloc_id);
|
||||
},
|
||||
ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
|
||||
|
@ -4,7 +4,7 @@ use rustc::ty::{self, Ty};
|
||||
use rustc_data_structures::indexed_vec::Idx;
|
||||
|
||||
use error::{EvalError, EvalResult};
|
||||
use eval_context::{EvalContext};
|
||||
use eval_context::EvalContext;
|
||||
use memory::MemoryPointer;
|
||||
use value::{PrimVal, Value, Pointer};
|
||||
|
||||
@ -17,6 +17,8 @@ pub enum Lvalue<'tcx> {
|
||||
/// before ever being dereferenced.
|
||||
ptr: Pointer,
|
||||
extra: LvalueExtra,
|
||||
/// Remember whether this lvalue is *supposed* to be aligned.
|
||||
aligned: bool,
|
||||
},
|
||||
|
||||
/// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
|
||||
@ -68,23 +70,25 @@ impl<'tcx> Lvalue<'tcx> {
|
||||
}
|
||||
|
||||
pub(crate) fn from_primval_ptr(ptr: Pointer) -> Self {
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None }
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true }
|
||||
}
|
||||
|
||||
pub(crate) fn from_ptr(ptr: MemoryPointer) -> Self {
|
||||
Self::from_primval_ptr(ptr.into())
|
||||
}
|
||||
|
||||
pub(super) fn to_ptr_and_extra(self) -> (Pointer, LvalueExtra) {
|
||||
pub(super) fn to_ptr_extra_aligned(self) -> (Pointer, LvalueExtra, bool) {
|
||||
match self {
|
||||
Lvalue::Ptr { ptr, extra } => (ptr, extra),
|
||||
Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
|
||||
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
|
||||
let (ptr, extra) = self.to_ptr_and_extra();
|
||||
let (ptr, extra, _aligned) = self.to_ptr_extra_aligned();
|
||||
// At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
|
||||
// and no matter where it came from, it now must be aligned.
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
ptr.to_ptr()
|
||||
}
|
||||
@ -175,6 +179,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
|
||||
pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
|
||||
let ty = self.lvalue_ty(lvalue);
|
||||
// Shortcut for things like accessing a fat pointer's field,
|
||||
@ -190,9 +195,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
match lvalue {
|
||||
Lvalue::Ptr { ptr, extra } => {
|
||||
Lvalue::Ptr { ptr, extra, aligned } => {
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
Ok(Value::ByRef(ptr))
|
||||
Ok(Value::ByRef(ptr, aligned))
|
||||
}
|
||||
Lvalue::Local { frame, local } => {
|
||||
self.stack[frame].get_local(local)
|
||||
@ -239,7 +244,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
},
|
||||
|
||||
General { ref variants, .. } => {
|
||||
let (_, base_extra) = base.to_ptr_and_extra();
|
||||
let (_, base_extra, _) = base.to_ptr_extra_aligned();
|
||||
if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
|
||||
// +1 for the discriminant, which is field 0
|
||||
(variants[variant_idx].offsets[field_index + 1], variants[variant_idx].packed)
|
||||
@ -289,17 +294,17 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
};
|
||||
|
||||
// Do not allocate in trivial cases
|
||||
let (base_ptr, base_extra) = match base {
|
||||
Lvalue::Ptr { ptr, extra } => (ptr, extra),
|
||||
let (base_ptr, base_extra, aligned) = match base {
|
||||
Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
|
||||
Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? {
|
||||
// in case the type has a single field, just return the value
|
||||
Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
|
||||
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
|
||||
return Ok(base);
|
||||
},
|
||||
Value::ByRef(_) |
|
||||
Value::ByRef(..) |
|
||||
Value::ByValPair(..) |
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_and_extra(),
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
|
||||
},
|
||||
Lvalue::Global(cid) => match self.globals.get(&cid).expect("uncached global").value {
|
||||
// in case the type has a single field, just return the value
|
||||
@ -307,9 +312,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
|
||||
return Ok(base);
|
||||
},
|
||||
Value::ByRef(_) |
|
||||
Value::ByRef(..) |
|
||||
Value::ByValPair(..) |
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_and_extra(),
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
|
||||
},
|
||||
};
|
||||
|
||||
@ -325,11 +330,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
let field_ty = self.monomorphize(field_ty, self.substs());
|
||||
|
||||
if packed {
|
||||
let size = self.type_size(field_ty)?.expect("packed struct must be sized");
|
||||
self.memory.mark_packed(ptr.to_ptr()?, size);
|
||||
}
|
||||
|
||||
let extra = if self.type_is_sized(field_ty) {
|
||||
LvalueExtra::None
|
||||
} else {
|
||||
@ -343,7 +343,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
base_extra
|
||||
};
|
||||
|
||||
Ok(Lvalue::Ptr { ptr, extra })
|
||||
Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
|
||||
}
|
||||
|
||||
fn eval_lvalue_projection(
|
||||
@ -351,7 +351,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
proj: &mir::LvalueProjection<'tcx>,
|
||||
) -> EvalResult<'tcx, Lvalue<'tcx>> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
let (ptr, extra) = match proj.elem {
|
||||
let (ptr, extra, aligned) = match proj.elem {
|
||||
Field(field, field_ty) => {
|
||||
let base = self.eval_lvalue(&proj.base)?;
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
@ -364,7 +364,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let base_layout = self.type_layout(base_ty)?;
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, base_extra) = base.to_ptr_and_extra();
|
||||
let (base_ptr, base_extra, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
use rustc::ty::layout::Layout::*;
|
||||
let extra = match *base_layout {
|
||||
@ -372,7 +372,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra,
|
||||
_ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
|
||||
};
|
||||
(base_ptr, extra)
|
||||
(base_ptr, extra, aligned)
|
||||
}
|
||||
|
||||
Deref => {
|
||||
@ -390,14 +390,14 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
match self.tcx.struct_tail(pointee_type).sty {
|
||||
ty::TyDynamic(..) => {
|
||||
let (ptr, vtable) = val.expect_ptr_vtable_pair(&self.memory)?;
|
||||
(ptr, LvalueExtra::Vtable(vtable))
|
||||
let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?;
|
||||
(ptr, LvalueExtra::Vtable(vtable), true)
|
||||
},
|
||||
ty::TyStr | ty::TySlice(_) => {
|
||||
let (ptr, len) = val.expect_slice(&self.memory)?;
|
||||
(ptr, LvalueExtra::Length(len))
|
||||
let (ptr, len) = val.into_slice(&mut self.memory)?;
|
||||
(ptr, LvalueExtra::Length(len), true)
|
||||
},
|
||||
_ => (val.read_ptr(&self.memory)?, LvalueExtra::None),
|
||||
_ => (val.into_ptr(&mut self.memory)?, LvalueExtra::None, true),
|
||||
}
|
||||
}
|
||||
|
||||
@ -406,7 +406,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_and_extra();
|
||||
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, len) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
|
||||
@ -415,7 +415,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let n = self.value_to_primval(n_ptr, usize)?.to_u64()?;
|
||||
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
|
||||
let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
|
||||
(ptr, LvalueExtra::None)
|
||||
(ptr, LvalueExtra::None, aligned)
|
||||
}
|
||||
|
||||
ConstantIndex { offset, min_length, from_end } => {
|
||||
@ -423,7 +423,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_and_extra();
|
||||
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized");
|
||||
@ -436,7 +436,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
};
|
||||
|
||||
let ptr = base_ptr.offset(index * elem_size, self.memory.layout)?;
|
||||
(ptr, LvalueExtra::None)
|
||||
(ptr, LvalueExtra::None, aligned)
|
||||
}
|
||||
|
||||
Subslice { from, to } => {
|
||||
@ -444,18 +444,18 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_and_extra();
|
||||
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
|
||||
assert!(u64::from(from) <= n - u64::from(to));
|
||||
let ptr = base_ptr.offset(u64::from(from) * elem_size, self.memory.layout)?;
|
||||
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
|
||||
(ptr, extra)
|
||||
(ptr, extra, aligned)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Lvalue::Ptr { ptr, extra })
|
||||
Ok(Lvalue::Ptr { ptr, extra, aligned })
|
||||
}
|
||||
|
||||
pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
|
||||
|
124
src/memory.rs
124
src/memory.rs
@ -1,5 +1,5 @@
|
||||
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
|
||||
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque, BTreeSet};
|
||||
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
|
||||
use std::{fmt, iter, ptr, mem, io};
|
||||
|
||||
use rustc::ty;
|
||||
@ -7,6 +7,7 @@ use rustc::ty::layout::{self, TargetDataLayout};
|
||||
|
||||
use error::{EvalError, EvalResult};
|
||||
use value::{PrimVal, self, Pointer};
|
||||
use eval_context::EvalContext;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Allocations and pointers
|
||||
@ -124,20 +125,6 @@ pub struct Memory<'a, 'tcx> {
|
||||
/// Target machine data layout to emulate.
|
||||
pub layout: &'a TargetDataLayout,
|
||||
|
||||
/// List of memory regions containing packed structures.
|
||||
///
|
||||
/// We mark memory as "packed" or "unaligned" for a single statement, and clear the marking
|
||||
/// afterwards. In the case where no packed structs are present, it's just a single emptyness
|
||||
/// check of a set instead of heavily influencing all memory access code as other solutions
|
||||
/// would. This is simpler than the alternative of passing a "packed" parameter to every
|
||||
/// load/store method.
|
||||
///
|
||||
/// One disadvantage of this solution is the fact that you can cast a pointer to a packed
|
||||
/// struct to a pointer to a normal struct and if you access a field of both in the same MIR
|
||||
/// statement, the normal struct access will succeed even though it shouldn't. But even with
|
||||
/// mir optimizations, that situation is hard/impossible to produce.
|
||||
packed: BTreeSet<Entry>,
|
||||
|
||||
/// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
|
||||
/// allocations for string and bytestring literals.
|
||||
literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
|
||||
@ -147,6 +134,11 @@ pub struct Memory<'a, 'tcx> {
|
||||
|
||||
/// The Key to use for the next thread-local allocation.
|
||||
next_thread_local: TlsKey,
|
||||
|
||||
/// To avoid having to pass flags to every single memory access, we have some global state saying whether
|
||||
/// alignment checking is currently enforced for read and/or write accesses.
|
||||
reads_are_aligned: bool,
|
||||
writes_are_aligned: bool,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
@ -159,11 +151,12 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
layout,
|
||||
memory_size: max_memory,
|
||||
memory_usage: 0,
|
||||
packed: BTreeSet::new(),
|
||||
static_alloc: HashSet::new(),
|
||||
literal_alloc_cache: HashMap::new(),
|
||||
thread_local: BTreeMap::new(),
|
||||
next_thread_local: 0,
|
||||
reads_are_aligned: true,
|
||||
writes_are_aligned: true,
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,30 +271,10 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
self.layout.endian
|
||||
}
|
||||
|
||||
pub fn check_align(&self, ptr: Pointer, align: u64, len: u64) -> EvalResult<'tcx> {
|
||||
pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
|
||||
let offset = match ptr.into_inner_primval() {
|
||||
PrimVal::Ptr(ptr) => {
|
||||
let alloc = self.get(ptr.alloc_id)?;
|
||||
// check whether the memory was marked as packed
|
||||
// we select all elements that have the correct alloc_id and are within
|
||||
// the range given by the offset into the allocation and the length
|
||||
let start = Entry {
|
||||
alloc_id: ptr.alloc_id,
|
||||
packed_start: 0,
|
||||
packed_end: ptr.offset + len,
|
||||
};
|
||||
let end = Entry {
|
||||
alloc_id: ptr.alloc_id,
|
||||
packed_start: ptr.offset + len,
|
||||
packed_end: 0,
|
||||
};
|
||||
for &Entry { packed_start, packed_end, .. } in self.packed.range(start..end) {
|
||||
// if the region we are checking is covered by a region in `packed`
|
||||
// ignore the actual alignment
|
||||
if packed_start <= ptr.offset && (ptr.offset + len) <= packed_end {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
if alloc.align < align {
|
||||
return Err(EvalError::AlignmentCheckFailed {
|
||||
has: alloc.align,
|
||||
@ -338,18 +311,6 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn mark_packed(&mut self, ptr: MemoryPointer, len: u64) {
|
||||
self.packed.insert(Entry {
|
||||
alloc_id: ptr.alloc_id,
|
||||
packed_start: ptr.offset,
|
||||
packed_end: ptr.offset + len,
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn clear_packed(&mut self) {
|
||||
self.packed.clear();
|
||||
}
|
||||
|
||||
pub(crate) fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
|
||||
let new_key = self.next_thread_local;
|
||||
self.next_thread_local += 1;
|
||||
@ -426,20 +387,6 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same
|
||||
// by the second field, and if those are the same, too, then by the third field.
|
||||
// This is exactly what we need for our purposes, since a range within an allocation
|
||||
// will give us all `Entry`s that have that `AllocId`, and whose `packed_start` is <= than
|
||||
// the one we're looking for, but not > the end of the range we're checking.
|
||||
// At the same time the `packed_end` is irrelevant for the sorting and range searching, but used for the check.
|
||||
// This kind of search breaks, if `packed_end < packed_start`, so don't do that!
|
||||
#[derive(Eq, PartialEq, Ord, PartialOrd)]
|
||||
struct Entry {
|
||||
alloc_id: AllocId,
|
||||
packed_start: u64,
|
||||
packed_end: u64,
|
||||
}
|
||||
|
||||
/// Allocation accessors
|
||||
impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
|
||||
@ -576,7 +523,9 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
return Ok(&[]);
|
||||
}
|
||||
// FIXME: check alignment for zst memory accesses?
|
||||
self.check_align(ptr.into(), align, size)?;
|
||||
if self.reads_are_aligned {
|
||||
self.check_align(ptr.into(), align)?;
|
||||
}
|
||||
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
|
||||
let alloc = self.get(ptr.alloc_id)?;
|
||||
assert_eq!(ptr.offset as usize as u64, ptr.offset);
|
||||
@ -590,7 +539,9 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
return Ok(&mut []);
|
||||
}
|
||||
// FIXME: check alignment for zst memory accesses?
|
||||
self.check_align(ptr.into(), align, size)?;
|
||||
if self.writes_are_aligned {
|
||||
self.check_align(ptr.into(), align)?;
|
||||
}
|
||||
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
|
||||
let alloc = self.get_mut(ptr.alloc_id)?;
|
||||
assert_eq!(ptr.offset as usize as u64, ptr.offset);
|
||||
@ -1125,3 +1076,46 @@ fn bit_index(bits: u64) -> (usize, usize) {
|
||||
assert_eq!(b as usize as u64, b);
|
||||
(a as usize, b as usize)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Unaligned accesses
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub(crate) trait HasMemory<'a, 'tcx> {
|
||||
fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx>;
|
||||
|
||||
// These are not supposed to be overriden.
|
||||
fn read_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
|
||||
where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
|
||||
{
|
||||
assert!(self.memory_mut().reads_are_aligned, "Unaligned reads must not be nested");
|
||||
self.memory_mut().reads_are_aligned = aligned;
|
||||
let t = f(self);
|
||||
self.memory_mut().reads_are_aligned = true;
|
||||
t
|
||||
}
|
||||
|
||||
fn write_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
|
||||
where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
|
||||
{
|
||||
assert!(self.memory_mut().writes_are_aligned, "Unaligned writes must not be nested");
|
||||
self.memory_mut().writes_are_aligned = aligned;
|
||||
let t = f(self);
|
||||
self.memory_mut().writes_are_aligned = true;
|
||||
t
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> HasMemory<'a, 'tcx> for Memory<'a, 'tcx> {
|
||||
#[inline]
|
||||
fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> HasMemory<'a, 'tcx> for EvalContext<'a, 'tcx> {
|
||||
#[inline]
|
||||
fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
|
||||
&mut self.memory
|
||||
}
|
||||
}
|
||||
|
@ -28,8 +28,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
/// Returns true as long as there are more things to do.
|
||||
pub fn step(&mut self) -> EvalResult<'tcx, bool> {
|
||||
// see docs on the `Memory::packed` field for why we do this
|
||||
self.memory.clear_packed();
|
||||
self.inc_step_counter_and_check_limit(1)?;
|
||||
if self.stack.is_empty() {
|
||||
return Ok(false);
|
||||
|
@ -11,10 +11,13 @@ use value::Value;
|
||||
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
pub(crate) fn drop_lvalue(&mut self, lval: Lvalue<'tcx>, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
|
||||
trace!("drop_lvalue: {:#?}", lval);
|
||||
// We take the address of the object. This may well be unaligned, which is fine for us here.
|
||||
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
|
||||
// by rustc.
|
||||
let val = match self.force_allocation(lval)? {
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => ptr.to_value_with_vtable(vtable),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => ptr.to_value_with_len(len),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => ptr.to_value(),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: _ } => ptr.to_value_with_vtable(vtable),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: _ } => ptr.to_value_with_len(len),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: _ } => ptr.to_value(),
|
||||
_ => bug!("force_allocation broken"),
|
||||
};
|
||||
self.drop(val, instance, ty, span)
|
||||
|
@ -8,6 +8,7 @@ use error::{EvalError, EvalResult};
|
||||
use eval_context::EvalContext;
|
||||
use lvalue::{Lvalue, LvalueExtra};
|
||||
use value::{PrimVal, PrimValKind, Value, Pointer};
|
||||
use memory::HasMemory;
|
||||
|
||||
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
pub(super) fn call_intrinsic(
|
||||
@ -44,7 +45,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"arith_offset" => {
|
||||
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
|
||||
self.write_ptr(dest, result_ptr, dest_ty)?;
|
||||
}
|
||||
@ -60,8 +61,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
"atomic_load_acq" |
|
||||
"volatile_load" => {
|
||||
let ty = substs.type_at(0);
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
self.write_value(Value::ByRef(ptr), dest, ty)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
self.write_value(Value::by_ref(ptr), dest, ty)?;
|
||||
}
|
||||
|
||||
"atomic_store" |
|
||||
@ -69,7 +70,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
"atomic_store_rel" |
|
||||
"volatile_store" => {
|
||||
let ty = substs.type_at(0);
|
||||
let dest = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let dest = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
self.write_value_to_ptr(arg_vals[1], dest, ty)?;
|
||||
}
|
||||
|
||||
@ -79,12 +80,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
_ if intrinsic_name.starts_with("atomic_xchg") => {
|
||||
let ty = substs.type_at(0);
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let change = self.value_to_primval(arg_vals[1], ty)?;
|
||||
let old = self.read_value(ptr, ty)?;
|
||||
let old = match old {
|
||||
Value::ByVal(val) => val,
|
||||
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
||||
Value::ByRef(..) => bug!("just read the value, can't be byref"),
|
||||
Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
|
||||
};
|
||||
self.write_primval(dest, old, ty)?;
|
||||
@ -93,13 +94,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
_ if intrinsic_name.starts_with("atomic_cxchg") => {
|
||||
let ty = substs.type_at(0);
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let expect_old = self.value_to_primval(arg_vals[1], ty)?;
|
||||
let change = self.value_to_primval(arg_vals[2], ty)?;
|
||||
let old = self.read_value(ptr, ty)?;
|
||||
let old = match old {
|
||||
Value::ByVal(val) => val,
|
||||
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
||||
Value::ByRef(..) => bug!("just read the value, can't be byref"),
|
||||
Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
|
||||
};
|
||||
let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
|
||||
@ -114,12 +115,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
"atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" |
|
||||
"atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => {
|
||||
let ty = substs.type_at(0);
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let change = self.value_to_primval(arg_vals[1], ty)?;
|
||||
let old = self.read_value(ptr, ty)?;
|
||||
let old = match old {
|
||||
Value::ByVal(val) => val,
|
||||
Value::ByRef(_) => bug!("just read the value, can't be byref"),
|
||||
Value::ByRef(..) => bug!("just read the value, can't be byref"),
|
||||
Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"),
|
||||
};
|
||||
self.write_primval(dest, old, ty)?;
|
||||
@ -144,8 +145,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
|
||||
if elem_size != 0 {
|
||||
let elem_align = self.type_align(elem_ty)?;
|
||||
let src = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let dest = arg_vals[1].read_ptr(&self.memory)?;
|
||||
let src = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let dest = arg_vals[1].into_ptr(&mut self.memory)?;
|
||||
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
|
||||
self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
|
||||
}
|
||||
@ -173,7 +174,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"discriminant_value" => {
|
||||
let ty = substs.type_at(0);
|
||||
let adt_ptr = arg_vals[0].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let adt_ptr = arg_vals[0].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
|
||||
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
|
||||
}
|
||||
@ -248,9 +249,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
|
||||
let init = |this: &mut Self, val: Value| {
|
||||
let zero_val = match val {
|
||||
Value::ByRef(ptr) => {
|
||||
Value::ByRef(ptr, aligned) => {
|
||||
// These writes have no alignment restriction anyway.
|
||||
this.memory.write_repeat(ptr, 0, size)?;
|
||||
Value::ByRef(ptr)
|
||||
Value::ByRef(ptr, aligned)
|
||||
},
|
||||
// TODO(solson): Revisit this, it's fishy to check for Undef here.
|
||||
Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
|
||||
@ -259,7 +261,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?;
|
||||
let ptr = Pointer::from(PrimVal::Ptr(ptr));
|
||||
this.memory.write_repeat(ptr, 0, size)?;
|
||||
Value::ByRef(ptr)
|
||||
Value::by_ref(ptr)
|
||||
}
|
||||
},
|
||||
Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)),
|
||||
@ -270,8 +272,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
};
|
||||
match dest {
|
||||
Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?,
|
||||
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat ptr target"),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?,
|
||||
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"),
|
||||
Lvalue::Global(cid) => self.modify_global(cid, init)?,
|
||||
}
|
||||
}
|
||||
@ -293,7 +295,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"move_val_init" => {
|
||||
let ty = substs.type_at(0);
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
|
||||
}
|
||||
|
||||
@ -306,7 +308,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"offset" => {
|
||||
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
|
||||
self.write_ptr(dest, result_ptr, dest_ty)?;
|
||||
}
|
||||
@ -394,11 +396,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"transmute" => {
|
||||
let src_ty = substs.type_at(0);
|
||||
let dest_ty = substs.type_at(1);
|
||||
let size = self.type_size(dest_ty)?.expect("transmute() type must be sized");
|
||||
let ptr = self.force_allocation(dest)?.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, size);
|
||||
self.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)?;
|
||||
self.write_maybe_aligned(/*aligned*/false, |ectx| {
|
||||
ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)
|
||||
})?;
|
||||
}
|
||||
|
||||
"unchecked_shl" => {
|
||||
@ -439,18 +440,18 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let size = dest_layout.size(&self.tcx.data_layout).bytes();
|
||||
let uninit = |this: &mut Self, val: Value| {
|
||||
match val {
|
||||
Value::ByRef(ptr) => {
|
||||
Value::ByRef(ptr, aligned) => {
|
||||
this.memory.mark_definedness(ptr, size, false)?;
|
||||
Ok(Value::ByRef(ptr))
|
||||
Ok(Value::ByRef(ptr, aligned))
|
||||
},
|
||||
_ => Ok(Value::ByVal(PrimVal::Undef)),
|
||||
}
|
||||
};
|
||||
match dest {
|
||||
Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None } =>
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } =>
|
||||
self.memory.mark_definedness(ptr, size, false)?,
|
||||
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat ptr target"),
|
||||
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"),
|
||||
Lvalue::Global(cid) => self.modify_global(cid, uninit)?,
|
||||
}
|
||||
}
|
||||
@ -461,11 +462,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
let ty_align = self.type_align(ty)?;
|
||||
let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
|
||||
let size = self.type_size(ty)?.expect("write_bytes() type must be sized");
|
||||
let ptr = arg_vals[0].read_ptr(&self.memory)?;
|
||||
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
|
||||
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
|
||||
if count > 0 {
|
||||
// TODO: Should we, at least, validate the alignment? (Also see memory::copy)
|
||||
self.memory.check_align(ptr, ty_align, size * count)?;
|
||||
self.memory.check_align(ptr, ty_align)?;
|
||||
self.memory.write_repeat(ptr, val_byte, size * count)?;
|
||||
}
|
||||
}
|
||||
@ -482,7 +483,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn size_and_align_of_dst(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: ty::Ty<'tcx>,
|
||||
value: Value,
|
||||
) -> EvalResult<'tcx, (u64, u64)> {
|
||||
@ -546,7 +547,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
Ok((size, align.abi()))
|
||||
}
|
||||
ty::TyDynamic(..) => {
|
||||
let (_, vtable) = value.expect_ptr_vtable_pair(&self.memory)?;
|
||||
let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
|
||||
// the second entry in the vtable is the dynamic size of the object.
|
||||
self.read_size_and_align_from_vtable(vtable)
|
||||
}
|
||||
@ -554,7 +555,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
ty::TySlice(_) | ty::TyStr => {
|
||||
let elem_ty = ty.sequence_element_type(self.tcx);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64;
|
||||
let (_, len) = value.expect_slice(&self.memory)?;
|
||||
let (_, len) = value.into_slice(&mut self.memory)?;
|
||||
let align = self.type_align(elem_ty)?;
|
||||
Ok((len * elem_size, align as u64))
|
||||
}
|
||||
|
@ -312,9 +312,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
if self.frame().mir.args_iter().count() == fields.len() + 1 {
|
||||
let offsets = variant.offsets.iter().map(|s| s.bytes());
|
||||
match arg_val {
|
||||
Value::ByRef(ptr) => {
|
||||
Value::ByRef(ptr, aligned) => {
|
||||
assert!(aligned, "Unaligned ByRef-values cannot occur as function arguments");
|
||||
for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) {
|
||||
let arg = Value::ByRef(ptr.offset(offset, self.memory.layout)?);
|
||||
let arg = Value::ByRef(ptr.offset(offset, self.memory.layout)?, true);
|
||||
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
|
||||
trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty);
|
||||
self.write_value(arg, dest, ty)?;
|
||||
@ -395,7 +396,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
},
|
||||
ty::InstanceDef::Virtual(_, idx) => {
|
||||
let ptr_size = self.memory.pointer_size();
|
||||
let (_, vtable) = self.eval_operand(&arg_operands[0])?.expect_ptr_vtable_pair(&self.memory)?;
|
||||
let (_, vtable) = self.eval_operand(&arg_operands[0])?.into_ptr_vtable_pair(&mut self.memory)?;
|
||||
let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3), self.memory.layout)?)?;
|
||||
let instance = self.memory.get_fn(fn_ptr.to_ptr()?)?;
|
||||
let mut arg_operands = arg_operands.to_vec();
|
||||
@ -574,7 +575,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
|
||||
}
|
||||
"alloc::heap::::__rust_dealloc" => {
|
||||
let ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let old_size = self.value_to_primval(args[1], usize)?.to_u64()?;
|
||||
let align = self.value_to_primval(args[2], usize)?.to_u64()?;
|
||||
if old_size == 0 {
|
||||
@ -586,7 +587,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
self.memory.deallocate(ptr, Some((old_size, align)))?;
|
||||
}
|
||||
"alloc::heap::::__rust_realloc" => {
|
||||
let ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let old_size = self.value_to_primval(args[1], usize)?.to_u64()?;
|
||||
let old_align = self.value_to_primval(args[2], usize)?.to_u64()?;
|
||||
let new_size = self.value_to_primval(args[3], usize)?.to_u64()?;
|
||||
@ -662,7 +663,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
"free" => {
|
||||
let ptr = args[0].read_ptr(&self.memory)?;
|
||||
let ptr = args[0].into_ptr(&mut self.memory)?;
|
||||
if !ptr.is_null()? {
|
||||
self.memory.deallocate(ptr.to_ptr()?, None)?;
|
||||
}
|
||||
@ -676,8 +677,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
"dlsym" => {
|
||||
let _handle = args[0].read_ptr(&self.memory)?;
|
||||
let symbol = args[1].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let _handle = args[0].into_ptr(&mut self.memory)?;
|
||||
let symbol = args[1].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let symbol_name = self.memory.read_c_str(symbol)?;
|
||||
let err = format!("bad c unicode symbol: {:?}", symbol_name);
|
||||
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
|
||||
@ -688,8 +689,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
// fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
|
||||
// We abort on panic, so not much is going on here, but we still have to call the closure
|
||||
let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
|
||||
let f = args[0].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let data = args[1].read_ptr(&self.memory)?;
|
||||
let f = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let data = args[1].into_ptr(&mut self.memory)?;
|
||||
let f_instance = self.memory.get_fn(f)?;
|
||||
self.write_null(dest, dest_ty)?;
|
||||
|
||||
@ -720,8 +721,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
"memcmp" => {
|
||||
let left = args[0].read_ptr(&self.memory)?;
|
||||
let right = args[1].read_ptr(&self.memory)?;
|
||||
let left = args[0].into_ptr(&mut self.memory)?;
|
||||
let right = args[1].into_ptr(&mut self.memory)?;
|
||||
let n = self.value_to_primval(args[2], usize)?.to_u64()?;
|
||||
|
||||
let result = {
|
||||
@ -740,7 +741,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
"memrchr" => {
|
||||
let ptr = args[0].read_ptr(&self.memory)?;
|
||||
let ptr = args[0].into_ptr(&mut self.memory)?;
|
||||
let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8;
|
||||
let num = self.value_to_primval(args[2], usize)?.to_u64()?;
|
||||
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position(|&c| c == val) {
|
||||
@ -752,7 +753,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
"memchr" => {
|
||||
let ptr = args[0].read_ptr(&self.memory)?;
|
||||
let ptr = args[0].into_ptr(&mut self.memory)?;
|
||||
let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8;
|
||||
let num = self.value_to_primval(args[2], usize)?.to_u64()?;
|
||||
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position(|&c| c == val) {
|
||||
@ -765,7 +766,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"getenv" => {
|
||||
let result = {
|
||||
let name_ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let name_ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let name = self.memory.read_c_str(name_ptr)?;
|
||||
match self.env_vars.get(name) {
|
||||
Some(&var) => PrimVal::Ptr(var),
|
||||
@ -778,7 +779,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
"unsetenv" => {
|
||||
let mut success = None;
|
||||
{
|
||||
let name_ptr = args[0].read_ptr(&self.memory)?;
|
||||
let name_ptr = args[0].into_ptr(&mut self.memory)?;
|
||||
if !name_ptr.is_null()? {
|
||||
let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
|
||||
if !name.is_empty() && !name.contains(&b'=') {
|
||||
@ -799,8 +800,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
"setenv" => {
|
||||
let mut new = None;
|
||||
{
|
||||
let name_ptr = args[0].read_ptr(&self.memory)?;
|
||||
let value_ptr = args[1].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let name_ptr = args[0].into_ptr(&mut self.memory)?;
|
||||
let value_ptr = args[1].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let value = self.memory.read_c_str(value_ptr)?;
|
||||
if !name_ptr.is_null()? {
|
||||
let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
|
||||
@ -825,7 +826,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
"write" => {
|
||||
let fd = self.value_to_primval(args[0], usize)?.to_u64()?;
|
||||
let buf = args[1].read_ptr(&self.memory)?;
|
||||
let buf = args[1].into_ptr(&mut self.memory)?;
|
||||
let n = self.value_to_primval(args[2], usize)?.to_u64()?;
|
||||
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
|
||||
let result = if fd == 1 || fd == 2 { // stdout/stderr
|
||||
@ -842,7 +843,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
"strlen" => {
|
||||
let ptr = args[0].read_ptr(&self.memory)?.to_ptr()?;
|
||||
let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
|
||||
let n = self.memory.read_c_str(ptr)?.len();
|
||||
self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?;
|
||||
}
|
||||
@ -885,10 +886,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
|
||||
// Hook pthread calls that go to the thread-local storage memory subsystem
|
||||
"pthread_key_create" => {
|
||||
let key_ptr = args[0].read_ptr(&self.memory)?;
|
||||
let key_ptr = args[0].into_ptr(&mut self.memory)?;
|
||||
|
||||
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
|
||||
let dtor = match args[1].read_ptr(&self.memory)?.into_inner_primval() {
|
||||
let dtor = match args[1].into_ptr(&mut self.memory)?.into_inner_primval() {
|
||||
PrimVal::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?),
|
||||
PrimVal::Bytes(0) => None,
|
||||
PrimVal::Bytes(_) => return Err(EvalError::ReadBytesAsPointer),
|
||||
@ -930,7 +931,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
||||
"pthread_setspecific" => {
|
||||
// The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t
|
||||
let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey;
|
||||
let new_ptr = args[1].read_ptr(&self.memory)?;
|
||||
let new_ptr = args[1].into_ptr(&mut self.memory)?;
|
||||
self.memory.store_tls(key, new_ptr)?;
|
||||
|
||||
// Return success (0)
|
||||
|
48
src/value.rs
48
src/value.rs
@ -5,7 +5,7 @@ use std::mem::transmute;
|
||||
use rustc::ty::layout::TargetDataLayout;
|
||||
|
||||
use error::{EvalError, EvalResult};
|
||||
use memory::{Memory, MemoryPointer};
|
||||
use memory::{Memory, MemoryPointer, HasMemory};
|
||||
|
||||
pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
|
||||
unsafe { transmute::<u32, f32>(bytes as u32) }
|
||||
@ -26,14 +26,15 @@ pub(super) fn f64_to_bytes(f: f64) -> u128 {
|
||||
/// A `Value` represents a single self-contained Rust value.
|
||||
///
|
||||
/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve
|
||||
/// value held directly, outside of any allocation (`ByVal`).
|
||||
/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember
|
||||
/// whether the pointer is supposed to be aligned or not (also see Lvalue).
|
||||
///
|
||||
/// For optimization of a few very common cases, there is also a representation for a pair of
|
||||
/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
|
||||
/// operations and fat pointers. This idea was taken from rustc's trans.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum Value {
|
||||
ByRef(Pointer),
|
||||
ByRef(Pointer, bool),
|
||||
ByVal(PrimVal),
|
||||
ByValPair(PrimVal, PrimVal),
|
||||
}
|
||||
@ -158,24 +159,35 @@ pub enum PrimValKind {
|
||||
}
|
||||
|
||||
impl<'a, 'tcx: 'a> Value {
|
||||
pub(super) fn read_ptr(&self, mem: &Memory<'a, 'tcx>) -> EvalResult<'tcx, Pointer> {
|
||||
#[inline]
|
||||
pub(super) fn by_ref(ptr: Pointer) -> Self {
|
||||
Value::ByRef(ptr, true)
|
||||
}
|
||||
|
||||
/// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
|
||||
/// this may have to perform a load.
|
||||
pub(super) fn into_ptr(&self, mem: &mut Memory<'a, 'tcx>) -> EvalResult<'tcx, Pointer> {
|
||||
use self::Value::*;
|
||||
match *self {
|
||||
ByRef(ptr) => mem.read_ptr(ptr.to_ptr()?),
|
||||
ByRef(ptr, aligned) => {
|
||||
mem.read_maybe_aligned(aligned, |mem| mem.read_ptr(ptr.to_ptr()?) )
|
||||
},
|
||||
ByVal(ptr) | ByValPair(ptr, _) => Ok(ptr.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn expect_ptr_vtable_pair(
|
||||
pub(super) fn into_ptr_vtable_pair(
|
||||
&self,
|
||||
mem: &Memory<'a, 'tcx>
|
||||
mem: &mut Memory<'a, 'tcx>
|
||||
) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
|
||||
use self::Value::*;
|
||||
match *self {
|
||||
ByRef(ref_ptr) => {
|
||||
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
|
||||
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
|
||||
Ok((ptr, vtable.to_ptr()?))
|
||||
ByRef(ref_ptr, aligned) => {
|
||||
mem.read_maybe_aligned(aligned, |mem| {
|
||||
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
|
||||
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
|
||||
Ok((ptr, vtable.to_ptr()?))
|
||||
})
|
||||
}
|
||||
|
||||
ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
|
||||
@ -184,20 +196,22 @@ impl<'a, 'tcx: 'a> Value {
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn expect_slice(&self, mem: &Memory<'a, 'tcx>) -> EvalResult<'tcx, (Pointer, u64)> {
|
||||
pub(super) fn into_slice(&self, mem: &mut Memory<'a, 'tcx>) -> EvalResult<'tcx, (Pointer, u64)> {
|
||||
use self::Value::*;
|
||||
match *self {
|
||||
ByRef(ref_ptr) => {
|
||||
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
|
||||
let len = mem.read_usize(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
|
||||
Ok((ptr, len))
|
||||
ByRef(ref_ptr, aligned) => {
|
||||
mem.write_maybe_aligned(aligned, |mem| {
|
||||
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
|
||||
let len = mem.read_usize(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
|
||||
Ok((ptr, len))
|
||||
})
|
||||
},
|
||||
ByValPair(ptr, val) => {
|
||||
let len = val.to_u128()?;
|
||||
assert_eq!(len as u64 as u128, len);
|
||||
Ok((ptr.into(), len as u64))
|
||||
},
|
||||
ByVal(_) => unimplemented!(),
|
||||
ByVal(_) => bug!("expected ptr and length, got {:?}", self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,50 @@
|
||||
#![allow(dead_code)]
|
||||
#![feature(unsize, coerce_unsized)]
|
||||
|
||||
#[repr(packed)]
|
||||
struct S {
|
||||
a: i32,
|
||||
b: i64,
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
struct Test1<'a> {
|
||||
x: u8,
|
||||
other: &'a u32,
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
struct Test2<'a> {
|
||||
x: u8,
|
||||
other: &'a Test1<'a>,
|
||||
}
|
||||
|
||||
fn test(t: Test2) {
|
||||
let x = *t.other.other;
|
||||
assert_eq!(x, 42);
|
||||
}
|
||||
|
||||
fn test_unsizing() {
|
||||
#[repr(packed)]
|
||||
struct UnalignedPtr<'a, T: ?Sized>
|
||||
where T: 'a,
|
||||
{
|
||||
data: &'a T,
|
||||
}
|
||||
|
||||
impl<'a, T, U> std::ops::CoerceUnsized<UnalignedPtr<'a, U>> for UnalignedPtr<'a, T>
|
||||
where
|
||||
T: std::marker::Unsize<U> + ?Sized,
|
||||
U: ?Sized,
|
||||
{ }
|
||||
|
||||
let arr = [1, 2, 3];
|
||||
let arr_unaligned: UnalignedPtr<[i32; 3]> = UnalignedPtr { data: &arr };
|
||||
let _uns: UnalignedPtr<[i32]> = arr_unaligned;
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let x = S {
|
||||
let mut x = S {
|
||||
a: 42,
|
||||
b: 99,
|
||||
};
|
||||
@ -16,4 +55,11 @@ fn main() {
|
||||
// can't do `assert_eq!(x.a, 42)`, because `assert_eq!` takes a reference
|
||||
assert_eq!({x.a}, 42);
|
||||
assert_eq!({x.b}, 99);
|
||||
|
||||
x.b = 77;
|
||||
assert_eq!({x.b}, 77);
|
||||
|
||||
test(Test2 { x: 0, other: &Test1 { x: 0, other: &42 }});
|
||||
|
||||
test_unsizing();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user