Merge pull request #336 from RalfJung/mir-validate

Valiation: Identify write locks using an abstract lvalue
This commit is contained in:
Ralf Jung 2017-09-13 18:32:39 +02:00 committed by GitHub
commit 237590ada3
14 changed files with 427 additions and 303 deletions

View File

@ -618,7 +618,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
for (field_index, operand) in operands.iter().enumerate() {
let value = self.eval_operand(operand)?;
let field_dest = self.lvalue_field(dest, field_index, dest_ty, value.ty)?;
let field_dest = self.lvalue_field(dest, mir::Field::new(field_index), dest_ty, value.ty)?;
self.write_value(value, field_dest)?;
}
Ok(())
@ -1466,7 +1466,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
/// ensures this Value is not a ByRef
pub(super) fn follow_by_ref_value(
&mut self,
&self,
value: Value,
ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
@ -1479,7 +1479,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
pub fn value_to_primval(
&mut self,
&self,
ValTy { value, ty } : ValTy<'tcx>,
) -> EvalResult<'tcx, PrimVal> {
match self.follow_by_ref_value(value, ty)? {

View File

@ -218,12 +218,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn lvalue_field(
&mut self,
base: Lvalue,
field_index: usize,
field: mir::Field,
base_ty: Ty<'tcx>,
field_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Lvalue> {
let base_layout = self.type_layout(base_ty)?;
use rustc::ty::layout::Layout::*;
let base_layout = self.type_layout(base_ty)?;
let field_index = field.index();
let (offset, packed) = match *base_layout {
Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed),
@ -405,7 +407,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
use rustc::mir::ProjectionElem::*;
let (ptr, extra) = match *proj_elem {
Field(field, field_ty) => {
return self.lvalue_field(base, field.index(), base_ty, field_ty);
return self.lvalue_field(base, field, base_ty, field_ty);
}
Downcast(_, variant) => {

View File

@ -9,7 +9,7 @@ use syntax::ast::Mutability;
use rustc::middle::region;
use super::{EvalResult, EvalErrorKind, PrimVal, Pointer, EvalContext, DynamicLifetime, Machine,
RangeMap};
RangeMap, AbsLvalue};
////////////////////////////////////////////////////////////////////////////////
// Locks
@ -23,14 +23,29 @@ pub enum AccessKind {
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
struct LockInfo {
struct LockInfo<'tcx> {
/// Stores for which lifetimes (of the original write lock) we got
/// which suspensions.
suspended: HashMap<DynamicLifetime, Vec<region::Scope>>,
suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
/// The current state of the lock that's actually effective.
active: Lock,
}
/// Write locks are identified by a stack frame and an "abstract" (untyped) lvalue.
/// It may be tempting to use the lifetime as identifier, but that does not work
/// for two reasons:
/// * First of all, due to subtyping, the same lock may be referred to with different
/// lifetimes.
/// * Secondly, different write locks may actually have the same lifetime. See `test2`
/// in `run-pass/many_shr_bor.rs`.
/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
/// considers the path frozen and hence the Id remains stable.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct WriteLockId<'tcx> {
frame: usize,
path: AbsLvalue<'tcx>,
}
#[derive(Clone, Debug, PartialEq)]
pub enum Lock {
NoLock,
@ -39,14 +54,14 @@ pub enum Lock {
}
use self::Lock::*;
impl Default for LockInfo {
impl<'tcx> Default for LockInfo<'tcx> {
fn default() -> Self {
LockInfo::new(NoLock)
}
}
impl LockInfo {
fn new(lock: Lock) -> LockInfo {
impl<'tcx> LockInfo<'tcx> {
fn new(lock: Lock) -> LockInfo<'tcx> {
LockInfo {
suspended: HashMap::new(),
active: lock,
@ -128,7 +143,7 @@ impl fmt::Debug for AllocId {
}
#[derive(Debug)]
pub struct Allocation<M> {
pub struct Allocation<'tcx, M> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer
pub bytes: Vec<u8>,
@ -146,17 +161,17 @@ pub struct Allocation<M> {
/// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
pub kind: MemoryKind<M>,
/// Memory regions that are locked by some function
locks: RangeMap<LockInfo>,
locks: RangeMap<LockInfo<'tcx>>,
}
impl<M> Allocation<M> {
fn check_locks<'tcx>(
impl<'tcx, M> Allocation<'tcx, M> {
fn check_locks(
&self,
frame: Option<usize>,
offset: u64,
len: u64,
access: AccessKind,
) -> Result<(), LockInfo> {
) -> Result<(), LockInfo<'tcx>> {
if len == 0 {
return Ok(());
}
@ -237,7 +252,7 @@ pub struct Memory<'a, 'tcx, M: Machine<'tcx>> {
pub data: M::MemoryData,
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
alloc_map: HashMap<u64, Allocation<M::MemoryKinds>>,
alloc_map: HashMap<u64, Allocation<'tcx, M::MemoryKinds>>,
/// The AllocId to assign to the next new regular allocation. Always incremented, never gets smaller.
next_alloc_id: u64,
@ -610,62 +625,72 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
/// Release or suspend a write lock of the given lifetime prematurely.
/// When releasing, if there is a read lock or someone else's write lock, that's an error.
/// We *do* accept relasing a NoLock, as this can happen when a local is first acquired and later force_allocate'd.
/// If no lock is held, that's fine. This can happen when e.g. a local is initialized
/// from a constant, and then suspended.
/// When suspending, the same cases are fine; we just register an additional suspension.
pub(crate) fn suspend_write_lock(
&mut self,
ptr: MemoryPointer,
len: u64,
lock_region: Option<region::Scope>,
lock_path: &AbsLvalue<'tcx>,
suspend: Option<region::Scope>,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let lock_lft = DynamicLifetime {
frame: cur_frame,
region: lock_region,
};
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) {
let is_our_lock = match lock.active {
WriteLock(lft) => lft == lock_lft,
WriteLock(lft) =>
// Double-check that we are holding the lock.
// (Due to subtyping, checking the region would not make any sense.)
lft.frame == cur_frame,
ReadLock(_) | NoLock => false,
};
if is_our_lock {
trace!("Releasing {:?} at {:?}", lock.active, lock_lft);
trace!("Releasing {:?}", lock.active);
// Disable the lock
lock.active = NoLock;
} else {
trace!(
"Not touching {:?} at {:?} as its not our lock",
"Not touching {:?} as it is not our lock",
lock.active,
lock_lft
);
}
match suspend {
Some(suspend_region) => {
trace!("Adding suspension to {:?} at {:?}", lock.active, lock_lft);
// We just released this lock, so add a new suspension.
// FIXME: Really, if there ever already is a suspension when is_our_lock, or if there is no suspension when !is_our_lock, something is amiss.
// But this model is not good enough yet to prevent that.
lock.suspended
.entry(lock_lft)
.or_insert_with(|| Vec::new())
.push(suspend_region);
// Check if we want to register a suspension
if let Some(suspend_region) = suspend {
let lock_id = WriteLockId {
frame: cur_frame,
path: lock_path.clone(),
};
trace!("Adding suspension to {:?}", lock_id);
let mut new_suspension = false;
lock.suspended
.entry(lock_id)
// Remember whether we added a new suspension or not
.or_insert_with(|| { new_suspension = true; Vec::new() })
.push(suspend_region);
// If the suspension is new, we should have owned this.
// If there already was a suspension, we should NOT have owned this.
if new_suspension == is_our_lock {
// All is well
continue 'locks;
}
None => {
// Make sure we did not try to release someone else's lock.
if !is_our_lock && lock.active != NoLock {
return err!(InvalidMemoryLockRelease {
ptr,
len,
frame: cur_frame,
lock: lock.active.clone(),
});
}
} else {
if !is_our_lock {
// All is well.
continue 'locks;
}
}
// If we get here, releasing this is an error except for NoLock.
if lock.active != NoLock {
return err!(InvalidMemoryLockRelease {
ptr,
len,
frame: cur_frame,
lock: lock.active.clone(),
});
}
}
Ok(())
@ -676,26 +701,27 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self,
ptr: MemoryPointer,
len: u64,
lock_path: &AbsLvalue<'tcx>,
lock_region: Option<region::Scope>,
suspended_region: region::Scope,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let lock_lft = DynamicLifetime {
let lock_id = WriteLockId {
frame: cur_frame,
region: lock_region,
path: lock_path.clone(),
};
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
for lock in alloc.locks.iter_mut(ptr.offset, len) {
// Check if we have a suspension here
let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_lft) {
let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
None => {
trace!("No suspension around, we can just acquire");
(true, false)
}
Some(suspensions) => {
trace!("Found suspension of {:?}, removing it", lock_lft);
trace!("Found suspension of {:?}, removing it", lock_id);
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
@ -715,12 +741,17 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
if remove_suspension {
// with NLL, we could do that up in the match above...
assert!(got_the_lock);
lock.suspended.remove(&lock_lft);
lock.suspended.remove(&lock_id);
}
if got_the_lock {
match lock.active {
ref mut active @ NoLock => {
*active = WriteLock(lock_lft);
*active = WriteLock(
DynamicLifetime {
frame: cur_frame,
region: lock_region,
}
);
}
_ => {
return err!(MemoryAcquireConflict {
@ -770,8 +801,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
if lock_ended {
lock.active = NoLock;
}
// Also clean up suspended write locks
lock.suspended.retain(|lft, _suspensions| !has_ended(lft));
// Also clean up suspended write locks when the function returns
if ending_region.is_none() {
lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
}
}
// Clean up the map
alloc.locks.retain(|lock| match lock.active {
@ -784,7 +817,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
/// Allocation accessors
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::MemoryKinds>> {
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<'tcx, M::MemoryKinds>> {
match id.into_alloc_id_kind() {
AllocIdKind::Function(_) => err!(DerefFunctionPointer),
AllocIdKind::Runtime(id) => {
@ -799,7 +832,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
fn get_mut_unchecked(
&mut self,
id: AllocId,
) -> EvalResult<'tcx, &mut Allocation<M::MemoryKinds>> {
) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
match id.into_alloc_id_kind() {
AllocIdKind::Function(_) => err!(DerefFunctionPointer),
AllocIdKind::Runtime(id) => {
@ -811,7 +844,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
}
}
fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<M::MemoryKinds>> {
fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
let alloc = self.get_mut_unchecked(id)?;
if alloc.mutable == Mutability::Mutable {
Ok(alloc)

View File

@ -39,4 +39,4 @@ pub use self::const_eval::{eval_body_as_integer, eval_body_as_primval};
pub use self::machine::Machine;
pub use self::validation::ValidationQuery;
pub use self::validation::{ValidationQuery, AbsLvalue};

View File

@ -1,4 +1,4 @@
use rustc::hir::Mutability;
use rustc::hir::{self, Mutability};
use rustc::hir::Mutability::*;
use rustc::mir::{self, ValidationOp, ValidationOperand};
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
@ -7,11 +7,12 @@ use rustc::traits;
use rustc::infer::InferCtxt;
use rustc::traits::Reveal;
use rustc::middle::region;
use rustc_data_structures::indexed_vec::Idx;
use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value,
Lvalue, LvalueExtra, Machine};
Lvalue, LvalueExtra, Machine, ValTy};
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue>;
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsLvalue<'tcx>, Lvalue)>;
#[derive(Copy, Clone, Debug, PartialEq)]
enum ValidationMode {
@ -31,8 +32,77 @@ impl ValidationMode {
}
}
// Validity checks
// Abstract lvalues
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum AbsLvalue<'tcx> {
Local(mir::Local),
Static(hir::def_id::DefId),
Projection(Box<AbsLvalueProjection<'tcx>>),
}
type AbsLvalueProjection<'tcx> = mir::Projection<'tcx, AbsLvalue<'tcx>, u64, ()>;
type AbsLvalueElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
impl<'tcx> AbsLvalue<'tcx> {
pub fn field(self, f: mir::Field) -> AbsLvalue<'tcx> {
self.elem(mir::ProjectionElem::Field(f, ()))
}
pub fn deref(self) -> AbsLvalue<'tcx> {
self.elem(mir::ProjectionElem::Deref)
}
pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsLvalue<'tcx> {
self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
}
pub fn index(self, index: u64) -> AbsLvalue<'tcx> {
self.elem(mir::ProjectionElem::Index(index))
}
fn elem(self, elem: AbsLvalueElem<'tcx>) -> AbsLvalue<'tcx> {
AbsLvalue::Projection(Box::new(AbsLvalueProjection {
base: self,
elem,
}))
}
}
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
fn abstract_lvalue_projection(&self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, AbsLvalueProjection<'tcx>> {
use self::mir::ProjectionElem::*;
let elem = match proj.elem {
Deref => Deref,
Field(f, _) => Field(f, ()),
Index(v) => {
let value = self.frame().get_local(v)?;
let ty = self.tcx.types.usize;
let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
Index(n)
},
ConstantIndex { offset, min_length, from_end } =>
ConstantIndex { offset, min_length, from_end },
Subslice { from, to } =>
Subslice { from, to },
Downcast(adt, sz) => Downcast(adt, sz),
};
Ok(AbsLvalueProjection {
base: self.abstract_lvalue(&proj.base)?,
elem
})
}
fn abstract_lvalue(&self, lval: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, AbsLvalue<'tcx>> {
Ok(match lval {
&mir::Lvalue::Local(l) => AbsLvalue::Local(l),
&mir::Lvalue::Static(ref s) => AbsLvalue::Static(s.def_id),
&mir::Lvalue::Projection(ref p) =>
AbsLvalue::Projection(Box::new(self.abstract_lvalue_projection(&*p)?)),
})
}
// Validity checks
pub(crate) fn validation_op(
&mut self,
op: ValidationOp,
@ -56,8 +126,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
use regex::Regex;
lazy_static! {
static ref RE: Regex = Regex::new("^(\
(std|alloc::heap::__core)::mem::uninitialized::|\
(std|alloc::heap::__core)::mem::forget::|\
(std|alloc::heap::__core)::mem::(uninitialized|forget)::|\
<(std|alloc)::heap::Heap as (std::heap|alloc::allocator)::Alloc>::|\
<(std|alloc::heap::__core)::mem::ManuallyDrop<T>><.*>::new$|\
<(std|alloc::heap::__core)::mem::ManuallyDrop<T> as std::ops::DerefMut><.*>::deref_mut$|\
@ -79,8 +148,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// We need to monomorphize ty *without* erasing lifetimes
let ty = operand.ty.subst(self.tcx, self.substs());
let lval = self.eval_lvalue(&operand.lval)?;
let abs_lval = self.abstract_lvalue(&operand.lval)?;
let query = ValidationQuery {
lval,
lval: (abs_lval, lval),
ty,
re: operand.re,
mutbl: operand.mutbl,
@ -264,12 +334,13 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
mode: ValidationMode,
) -> EvalResult<'tcx> {
// TODO: Maybe take visibility/privacy into account.
for (idx, field) in variant.fields.iter().enumerate() {
let field_ty = field.ty(self.tcx, subst);
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
for (idx, field_def) in variant.fields.iter().enumerate() {
let field_ty = field_def.ty(self.tcx, subst);
let field = mir::Field::new(idx);
let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
self.validate(
ValidationQuery {
lval: field_lvalue,
lval: (query.lval.0.clone().field(field), field_lvalue),
ty: field_ty,
..query
},
@ -282,6 +353,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
fn validate_ptr(
&mut self,
val: Value,
abs_lval: AbsLvalue<'tcx>,
pointee_ty: Ty<'tcx>,
re: Option<region::Scope>,
mutbl: Mutability,
@ -296,7 +368,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
self.validate(
ValidationQuery {
lval: pointee_lvalue,
lval: (abs_lval.deref(), pointee_lvalue),
ty: pointee_ty,
re,
mutbl,
@ -306,23 +378,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
/// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks
#[inline]
fn validate(&mut self, query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx> {
match self.try_validate(query, mode) {
// ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because
// we have to release the return value of a function; due to destination-passing-style
// the callee may directly write there.
// TODO: Ideally we would know whether the destination is already initialized, and only
// release if it is. But of course that can't even always be statically determined.
Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
if mode == ValidationMode::ReleaseUntil(None) => {
return Ok(());
}
res => res,
}
}
fn try_validate(
fn validate(
&mut self,
mut query: ValidationQuery<'tcx>,
mode: ValidationMode,
@ -345,7 +401,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// HACK: For now, bail out if we hit a dead local during recovery (can happen because sometimes we have
// StorageDead before EndRegion due to https://github.com/rust-lang/rust/issues/43481).
// TODO: We should rather fix the MIR.
match query.lval {
match query.lval.1 {
Lvalue::Local { frame, local } => {
let res = self.stack[frame].get_local(local);
match (res, mode) {
@ -380,7 +436,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// Tracking the same state for locals not backed by memory would just duplicate too
// much machinery.
// FIXME: We ignore alignment.
let (ptr, extra) = self.force_allocation(query.lval)?.to_ptr_extra_aligned();
let (ptr, extra) = self.force_allocation(query.lval.1)?.to_ptr_extra_aligned();
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
@ -431,6 +487,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
self.memory.recover_write_lock(
ptr,
len,
&query.lval.0,
query.re,
ending_ce,
)?
@ -439,7 +496,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
self.memory.suspend_write_lock(
ptr,
len,
query.re,
&query.lval.0,
suspended_ce,
)?
}
@ -449,206 +506,225 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
}
match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
// TODO: Make sure these are not undef.
// We could do a bounds-check and other sanity checks on the lvalue, but it would be a bug in miri for this to ever fail.
Ok(())
}
TyBool | TyFloat(_) | TyChar | TyStr => {
// TODO: Check if these are valid bool/float/codepoint/UTF-8, respectively (and in particular, not undef).
Ok(())
}
TyNever => err!(ValidationFailure(format!("The empty type is never valid."))),
TyRef(region,
ty::TypeAndMut {
ty: pointee_ty,
mutbl,
}) => {
let val = self.read_lvalue(query.lval)?;
// Sharing restricts our context
if mutbl == MutImmutable {
query.mutbl = MutImmutable;
let res = do catch {
match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
// TODO: Make sure these are not undef.
// We could do a bounds-check and other sanity checks on the lvalue, but it would be a bug in miri for this to ever fail.
Ok(())
}
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
// we record the region of this borrow to the context.
if query.re == None {
match *region {
ReScope(scope) => query.re = Some(scope),
// It is possible for us to encounter erased lifetimes here because the lifetimes in
// this functions' Subst will be erased.
_ => {}
TyBool | TyFloat(_) | TyChar | TyStr => {
// TODO: Check if these are valid bool/float/codepoint/UTF-8, respectively (and in particular, not undef).
Ok(())
}
TyNever => err!(ValidationFailure(format!("The empty type is never valid."))),
TyRef(region,
ty::TypeAndMut {
ty: pointee_ty,
mutbl,
}) => {
let val = self.read_lvalue(query.lval.1)?;
// Sharing restricts our context
if mutbl == MutImmutable {
query.mutbl = MutImmutable;
}
}
self.validate_ptr(val, pointee_ty, query.re, query.mutbl, mode)
}
TyAdt(adt, _) if adt.is_box() => {
let val = self.read_lvalue(query.lval)?;
self.validate_ptr(val, query.ty.boxed_ty(), query.re, query.mutbl, mode)
}
TyFnPtr(_sig) => {
let ptr = self.read_lvalue(query.lval)?
.into_ptr(&self.memory)?
.to_ptr()?;
self.memory.get_fn(ptr)?;
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
Ok(())
}
TyFnDef(..) => {
// This is a zero-sized type with all relevant data sitting in the type.
// There is nothing to validate.
Ok(())
}
// Compound types
TySlice(elem_ty) => {
let len = match query.lval {
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
_ => {
bug!(
"acquire_valid of a TySlice given non-slice lvalue: {:?}",
query.lval
)
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
// we record the region of this borrow to the context.
if query.re == None {
match *region {
ReScope(scope) => query.re = Some(scope),
// It is possible for us to encounter erased lifetimes here because the lifetimes in
// this functions' Subst will be erased.
_ => {}
}
}
};
for i in 0..len {
let inner_lvalue = self.lvalue_index(query.lval, query.ty, i)?;
self.validate(
ValidationQuery {
lval: inner_lvalue,
ty: elem_ty,
..query
},
mode,
)?;
self.validate_ptr(val, query.lval.0, pointee_ty, query.re, query.mutbl, mode)
}
Ok(())
}
TyArray(elem_ty, len) => {
let len = len.val.to_const_int().unwrap().to_u64().unwrap();
for i in 0..len {
let inner_lvalue = self.lvalue_index(query.lval, query.ty, i as u64)?;
self.validate(
ValidationQuery {
lval: inner_lvalue,
ty: elem_ty,
..query
},
mode,
)?;
TyAdt(adt, _) if adt.is_box() => {
let val = self.read_lvalue(query.lval.1)?;
self.validate_ptr(val, query.lval.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)
}
Ok(())
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
let vtable = match query.lval {
Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
_ => {
bug!(
"acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}",
query.lval
)
}
};
self.read_size_and_align_from_vtable(vtable)?;
// TODO: Check that the vtable contains all the function pointers we expect it to have.
// Trait objects cannot have any operations performed
// on them directly. We cannot, in general, even acquire any locks as the trait object *could*
// contain an UnsafeCell. If we call functions to get access to data, we will validate
// their return values. So, it doesn't seem like there's anything else to do.
Ok(())
}
TyAdt(adt, subst) => {
if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() &&
query.mutbl == MutImmutable
{
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
return Ok(());
TyFnPtr(_sig) => {
let ptr = self.read_lvalue(query.lval.1)?
.into_ptr(&self.memory)?
.to_ptr()?;
self.memory.get_fn(ptr)?;
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
Ok(())
}
TyFnDef(..) => {
// This is a zero-sized type with all relevant data sitting in the type.
// There is nothing to validate.
Ok(())
}
match adt.adt_kind() {
AdtKind::Enum => {
// TODO: Can we get the discriminant without forcing an allocation?
let ptr = self.force_allocation(query.lval)?.to_ptr()?;
let discr = self.read_discriminant_value(ptr, query.ty)?;
// Get variant index for discriminant
let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| {
variant_discr.to_u128_unchecked() == discr
});
let variant_idx = match variant_idx {
Some(val) => val,
None => return err!(InvalidDiscriminant),
};
let variant = &adt.variants[variant_idx];
if variant.fields.len() > 0 {
// Downcast to this variant, if needed
let lval = if adt.variants.len() > 1 {
self.eval_lvalue_projection(
query.lval,
query.ty,
&mir::ProjectionElem::Downcast(adt, variant_idx),
)?
} else {
// Compound types
TySlice(elem_ty) => {
let len = match query.lval.1 {
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
_ => {
bug!(
"acquire_valid of a TySlice given non-slice lvalue: {:?}",
query.lval
};
// Recursively validate the fields
self.validate_variant(
ValidationQuery { lval, ..query },
variant,
subst,
mode,
)
} else {
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
}
};
for i in 0..len {
let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i)?;
self.validate(
ValidationQuery {
lval: (query.lval.0.clone().index(i), inner_lvalue),
ty: elem_ty,
..query
},
mode,
)?;
}
Ok(())
}
TyArray(elem_ty, len) => {
let len = len.val.to_const_int().unwrap().to_u64().unwrap();
for i in 0..len {
let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i as u64)?;
self.validate(
ValidationQuery {
lval: (query.lval.0.clone().index(i as u64), inner_lvalue),
ty: elem_ty,
..query
},
mode,
)?;
}
Ok(())
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
let vtable = match query.lval.1 {
Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
_ => {
bug!(
"acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}",
query.lval
)
}
};
self.read_size_and_align_from_vtable(vtable)?;
// TODO: Check that the vtable contains all the function pointers we expect it to have.
// Trait objects cannot have any operations performed
// on them directly. We cannot, in general, even acquire any locks as the trait object *could*
// contain an UnsafeCell. If we call functions to get access to data, we will validate
// their return values. So, it doesn't seem like there's anything else to do.
Ok(())
}
TyAdt(adt, subst) => {
if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() &&
query.mutbl == MutImmutable
{
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
return Ok(());
}
match adt.adt_kind() {
AdtKind::Enum => {
// TODO: Can we get the discriminant without forcing an allocation?
let ptr = self.force_allocation(query.lval.1)?.to_ptr()?;
let discr = self.read_discriminant_value(ptr, query.ty)?;
// Get variant index for discriminant
let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| {
variant_discr.to_u128_unchecked() == discr
});
let variant_idx = match variant_idx {
Some(val) => val,
None => return err!(InvalidDiscriminant),
};
let variant = &adt.variants[variant_idx];
if variant.fields.len() > 0 {
// Downcast to this variant, if needed
let lval = if adt.variants.len() > 1 {
(
query.lval.0.downcast(adt, variant_idx),
self.eval_lvalue_projection(
query.lval.1,
query.ty,
&mir::ProjectionElem::Downcast(adt, variant_idx),
)?,
)
} else {
query.lval
};
// Recursively validate the fields
self.validate_variant(
ValidationQuery { lval, ..query },
variant,
subst,
mode,
)
} else {
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
Ok(())
}
}
AdtKind::Struct => {
self.validate_variant(query, adt.struct_variant(), subst, mode)
}
AdtKind::Union => {
// No guarantees are provided for union types.
// TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
Ok(())
}
}
AdtKind::Struct => {
self.validate_variant(query, adt.struct_variant(), subst, mode)
}
TyTuple(ref types, _) => {
for (idx, field_ty) in types.iter().enumerate() {
let field = mir::Field::new(idx);
let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
self.validate(
ValidationQuery {
lval: (query.lval.0.clone().field(field), field_lvalue),
ty: field_ty,
..query
},
mode,
)?;
}
AdtKind::Union => {
// No guarantees are provided for union types.
// TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
Ok(())
Ok(())
}
TyClosure(def_id, ref closure_substs) => {
for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
let field = mir::Field::new(idx);
let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
self.validate(
ValidationQuery {
lval: (query.lval.0.clone().field(field), field_lvalue),
ty: field_ty,
..query
},
mode,
)?;
}
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
// Is there other things we can/should check? Like vtable pointers?
Ok(())
}
// FIXME: generators aren't validated right now
TyGenerator(..) => Ok(()),
_ => bug!("We already established that this is a type we support. ({})", query.ty),
}
TyTuple(ref types, _) => {
for (idx, field_ty) in types.iter().enumerate() {
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(
ValidationQuery {
lval: field_lvalue,
ty: field_ty,
..query
},
mode,
)?;
}
Ok(())
};
match res {
// ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because
// we have to release the return value of a function; due to destination-passing-style
// the callee may directly write there.
// TODO: Ideally we would know whether the destination is already initialized, and only
// release if it is. But of course that can't even always be statically determined.
Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
if mode == ValidationMode::ReleaseUntil(None) => {
return Ok(());
}
TyClosure(def_id, ref closure_substs) => {
for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(
ValidationQuery {
lval: field_lvalue,
ty: field_ty,
..query
},
mode,
)?;
}
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
// Is there other things we can/should check? Like vtable pointers?
Ok(())
}
// FIXME: generators aren't validated right now
TyGenerator(..) => Ok(()),
_ => bug!("We already established that this is a type we support. ({})", query.ty),
res => res,
}
}
}

View File

@ -3,6 +3,7 @@
rustc_private,
conservative_impl_trait,
never_type,
catch_expr,
)]
// From rustc.

View File

@ -1,4 +1,4 @@
// FIXME: Probably failing due to https://github.com/solson/miri/issues/296
// FIXME: Something in panic handling fails validation with full-MIR
// compile-flags: -Zmir-emit-validate=0
//error-pattern: the evaluated program panicked

View File

@ -0,0 +1,24 @@
// Make sure validation can handle many overlapping shared borrows for different parts of a data structure
#![allow(unused_variables)]
use std::cell::RefCell;
fn evil(x: *mut i32) {
unsafe { *x = 0; } //~ ERROR: in conflict with lock WriteLock
}
fn test(r: &mut RefCell<i32>) {
let x = &*r; // releasing write lock, first suspension recorded
let mut x_ref = x.borrow_mut();
let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock
{
let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension
let y = &*r; // second suspension for the outer write lock
let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock
}
// If the two locks are mixed up, here we should have a write lock, but we do not.
evil(x_inner as *mut _);
}
fn main() {
test(&mut RefCell::new(0));
}

View File

@ -1,11 +0,0 @@
// FIXME: Probably failing due to https://github.com/solson/miri/issues/296
// compile-flags: -Zmir-emit-validate=0
// error-pattern: the evaluated program panicked
#[derive(Debug)]
struct A;
fn main() {
// can't use assert_eq, b/c that will try to print the pointer addresses with full MIR enabled
assert!(&A as *const A as *const () == &() as *const _)
}

View File

@ -1,11 +0,0 @@
// FIXME: Probably failing due to https://github.com/solson/miri/issues/296
// compile-flags: -Zmir-emit-validate=0
// error-pattern: the evaluated program panicked
#[derive(Debug)]
struct A;
fn main() {
// can't use assert_eq, b/c that will try to print the pointer addresses with full MIR enabled
assert!(&A as *const A == &A as *const A);
}

View File

@ -1,5 +1,3 @@
// FIXME: disable validation until we figure out how to handle <https://github.com/solson/miri/issues/296>.
// compile-flags: -Zmir-emit-validate=0
use std::collections::{self, HashMap};
use std::hash::BuildHasherDefault;

View File

@ -1,4 +1,4 @@
// mir validation can't cope with `mem::uninitialized::<SomeEnum>()`
// mir validation can't cope with `mem::uninitialized()`, so this test fails with validation & full-MIR.
// compile-flags: -Zmir-emit-validate=0
#[derive(PartialEq, Eq, PartialOrd, Ord)]

View File

@ -1,4 +1,4 @@
// Make sure validation can handle many overlapping shared borrows for difference parts of a data structure
// Make sure validation can handle many overlapping shared borrows for different parts of a data structure
#![allow(unused_variables)]
use std::cell::RefCell;
@ -24,7 +24,7 @@ fn test1() {
fn test2(r: &mut RefCell<i32>) {
let x = &*r; // releasing write lock, first suspension recorded
let mut x_ref = x.borrow_mut();
let x_inner : &mut i32 = &mut *x_ref;
let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock
let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension
let y = &*r; // second suspension for the outer write lock
let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock

12
tests/run-pass/zst2.rs Normal file
View File

@ -0,0 +1,12 @@
#![allow(dead_code)]
#[derive(Debug)]
struct A;
fn main() {
// can't use assert_eq, b/c that will try to print the pointer addresses with full MIR enabled
// FIXME: Test disabled for now, see <https://github.com/solson/miri/issues/131>.
//assert!(&A as *const A as *const () == &() as *const _);
//assert!(&A as *const A == &A as *const A);
}