Retag is the only operation that generates new tags

This commit is contained in:
Ralf Jung 2018-11-07 14:56:25 +01:00
parent 3554d1acdc
commit 09919c2b59
11 changed files with 161 additions and 102 deletions

1
.gitignore vendored
View File

@ -2,6 +2,5 @@ target
/doc
tex/*/out
*.dot
*.mir
*.rs.bk
Cargo.lock

View File

@ -555,7 +555,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
"pthread_attr_getstack" => {
// second argument is where we are supposed to write the stack size
let ptr = self.ref_to_mplace(self.read_immediate(args[1])?)?;
let ptr = self.deref_operand(args[1])?;
let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
self.write_scalar(stackaddr, ptr.into())?;
// return 0

View File

@ -59,7 +59,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"atomic_load_relaxed" |
"atomic_load_acq" |
"volatile_load" => {
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let ptr = self.deref_operand(args[0])?;
let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, dest)?;
}
@ -68,7 +68,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"atomic_store_relaxed" |
"atomic_store_rel" |
"volatile_store" => {
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let ptr = self.deref_operand(args[0])?;
let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, ptr.into())?;
}
@ -78,7 +78,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let ptr = self.deref_operand(args[0])?;
let new = self.read_scalar(args[1])?;
let old = self.read_scalar(ptr.into())?;
self.write_scalar(old, dest)?; // old value is returned
@ -86,10 +86,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let expect_old = self.read_immediate(args[1])?; // read as value for the sake of `binary_op_imm()`
let ptr = self.deref_operand(args[0])?;
let expect_old = self.read_immediate(args[1])?; // read as immediate for the sake of `binary_op_imm()`
let new = self.read_scalar(args[2])?;
let old = self.read_immediate(ptr.into())?; // read as value for the sake of `binary_op_imm()`
let old = self.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op_imm()`
// binary_op_imm will bail if either of them is not a scalar
let (eq, _) = self.binary_op_imm(mir::BinOp::Eq, old, expect_old)?;
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
@ -125,7 +125,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let ptr = self.deref_operand(args[0])?;
if !ptr.layout.ty.is_integral() {
return err!(Unimplemented(format!("Atomic arithmetic operations only work on integer types")));
}
@ -167,7 +167,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
"discriminant_value" => {
let place = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let place = self.deref_operand(args[0])?;
let discr_val = self.read_discriminant(place.into())?.0;
self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
}
@ -279,7 +279,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
"move_val_init" => {
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let ptr = self.deref_operand(args[0])?;
self.copy_op(args[1], ptr.into())?;
}
@ -347,7 +347,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
"size_of_val" => {
let mplace = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let mplace = self.deref_operand(args[0])?;
let (size, _) = self.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = self.pointer_size();
@ -359,7 +359,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"min_align_of_val" |
"align_of_val" => {
let mplace = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let mplace = self.deref_operand(args[0])?;
let (_, align) = self.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = self.pointer_size();

View File

@ -296,7 +296,6 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
type AllocExtra = stacked_borrows::Stacks;
type PointerTag = Borrow;
const ENABLE_PTR_TRACKING_HOOKS: bool = true;
type MemoryMap = MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Borrow, Self::AllocExtra>)>;
@ -446,26 +445,6 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
Cow::Owned(alloc)
}
#[inline(always)]
fn tag_reference(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
place: MPlaceTy<'tcx, Borrow>,
mutability: Option<hir::Mutability>,
) -> EvalResult<'tcx, Scalar<Borrow>> {
let (size, _) = ecx.size_and_align_of_mplace(place)?
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size_and_align());
if !ecx.machine.validate || size == Size::ZERO {
// No tracking
Ok(place.ptr)
} else {
let ptr = place.ptr.to_ptr()?;
let tag = ecx.tag_reference(place, size, mutability.into())?;
Ok(Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)))
}
}
#[inline(always)]
fn tag_dereference(
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
place: MPlaceTy<'tcx, Borrow>,
@ -478,7 +457,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
// No tracking
Ok(place.ptr)
} else {
let ptr = place.ptr.to_ptr()?;
let ptr = place.ptr.to_ptr()?; // assert this is not a scalar
let tag = ecx.tag_dereference(place, size, mutability.into())?;
Ok(Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)))
}
@ -499,6 +478,31 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
}
}
#[inline]
fn escape_to_raw(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
ptr: OpTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx> {
// It is tempting to check the type here, but drop glue does EscapeToRaw
// on a raw pointer.
// This is deliberately NOT `deref_operand` as we do not want `tag_dereference`
// to be called! That would kill the original tag if we got a raw ptr.
let place = ecx.ref_to_mplace(ecx.read_immediate(ptr)?)?;
let (size, _) = ecx.size_and_align_of_mplace(place)?
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size_and_align());
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag ||
!ecx.machine.validate || size == Size::ZERO
{
// No tracking, or no retagging. The latter is possible because a dependency of ours
// might be called with different flags than we are, so there are `Retag`
// statements but we do not want to execute them.
Ok(())
} else {
ecx.escape_to_raw(place, size)
}
}
#[inline(always)]
fn retag(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
@ -506,12 +510,14 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
place: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag || !Self::enforce_validity(ecx) {
// No tracking, or no retagging. This is possible because a dependency of ours might be
// called with different flags than we are,
// No tracking, or no retagging. The latter is possible because a dependency of ours
// might be called with different flags than we are, so there are `Retag`
// statements but we do not want to execute them.
// Also, honor the whitelist in `enforce_validity` because otherwise we might retag
// uninitialized data.
return Ok(())
Ok(())
} else {
ecx.retag(fn_entry, place)
}
ecx.retag(fn_entry, place)
}
}

View File

@ -6,7 +6,7 @@ use rustc::hir;
use crate::{
EvalResult, MiriEvalContext, HelpersEvalContextExt,
MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra,
Pointer, PlaceTy, MPlaceTy,
Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy,
};
pub type Timestamp = u64;
@ -395,13 +395,6 @@ impl<'tcx> Stacks {
pub trait EvalContextExt<'tcx> {
fn tag_reference(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
usage: UsageKind,
) -> EvalResult<'tcx, Borrow>;
fn tag_dereference(
&self,
place: MPlaceTy<'tcx, Borrow>,
@ -415,47 +408,27 @@ pub trait EvalContextExt<'tcx> {
kind: MemoryKind<MiriMemoryKind>,
) -> Borrow;
/// Retag an indidual pointer, returning the retagged version.
fn retag_ptr(
&mut self,
ptr: ImmTy<'tcx, Borrow>,
mutbl: hir::Mutability,
) -> EvalResult<'tcx, Immediate<Borrow>>;
fn retag(
&mut self,
fn_entry: bool,
place: PlaceTy<'tcx, Borrow>
) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
/// Called for place-to-value conversion.
fn tag_reference(
fn escape_to_raw(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
usage: UsageKind,
) -> EvalResult<'tcx, Borrow> {
let ptr = place.ptr.to_ptr()?;
let time = self.machine.stacked_borrows.increment_clock();
let new_bor = match usage {
UsageKind::Write => Borrow::Uniq(time),
UsageKind::Read => Borrow::Shr(Some(time)),
UsageKind::Raw => Borrow::Shr(None),
};
trace!("tag_reference: Creating new reference ({:?}) for {:?} (pointee {}): {:?}",
usage, ptr, place.layout.ty, new_bor);
// Update the stacks. First create the new ref as usual, then maybe freeze stuff.
self.memory().check_bounds(ptr, size, false)?;
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
alloc.extra.use_and_maybe_re_borrow(ptr, size, usage, Some(new_bor))?;
// Maybe freeze stuff
if let Borrow::Shr(Some(bor_t)) = new_bor {
self.visit_frozen(place, size, |frz_ptr, size| {
debug_assert_eq!(frz_ptr.alloc_id, ptr.alloc_id);
// Be frozen!
alloc.extra.freeze(frz_ptr, size, bor_t)
})?;
}
Ok(new_bor)
}
) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
/// Called for value-to-place conversion.
///
/// Note that this does NOT mean that all this memory will actually get accessed/referenced!
@ -466,9 +439,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
size: Size,
usage: UsageKind,
) -> EvalResult<'tcx, Borrow> {
let ptr = place.ptr.to_ptr()?;
trace!("tag_dereference: Accessing reference ({:?}) for {:?} (pointee {})",
usage, ptr, place.layout.ty);
usage, place.ptr, place.layout.ty);
let ptr = place.ptr.to_ptr()?;
// In principle we should not have to do anything here. However, with transmutes involved,
// it can happen that the tag of `ptr` does not actually match `usage`, and we
// should adjust for that.
@ -551,6 +524,50 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
Borrow::Uniq(time)
}
fn retag_ptr(
&mut self,
val: ImmTy<'tcx, Borrow>,
mutbl: hir::Mutability,
) -> EvalResult<'tcx, Immediate<Borrow>> {
// We want a place for where the ptr *points to*, so we get one.
let place = self.ref_to_mplace(val)?;
let size = self.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size);
if size == Size::ZERO {
// Nothing to do for ZSTs.
return Ok(*val);
}
// Prepare to re-borrow this place.
let ptr = place.ptr.to_ptr()?;
let time = self.machine.stacked_borrows.increment_clock();
let new_bor = match mutbl {
hir::MutMutable => Borrow::Uniq(time),
hir::MutImmutable => Borrow::Shr(Some(time)),
};
trace!("retag: Creating new reference ({:?}) for {:?} (pointee {}): {:?}",
mutbl, ptr, place.layout.ty, new_bor);
// Update the stacks. First create a new borrow, then maybe freeze stuff.
self.memory().check_bounds(ptr, size, false)?; // `ptr_dereference` wouldn't do any checks if this is a raw ptr
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
alloc.extra.use_and_maybe_re_borrow(ptr, size, Some(mutbl).into(), Some(new_bor))?;
// Maybe freeze stuff
if let Borrow::Shr(Some(bor_t)) = new_bor {
self.visit_frozen(place, size, |frz_ptr, size| {
debug_assert_eq!(frz_ptr.alloc_id, ptr.alloc_id);
// Be frozen!
alloc.extra.freeze(frz_ptr, size, bor_t)
})?;
}
// Compute the new value and return that
let new_ptr = Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, new_bor));
let new_place = MemPlace { ptr: new_ptr, ..*place };
Ok(new_place.to_ref())
}
fn retag(
&mut self,
_fn_entry: bool,
@ -558,20 +575,30 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
) -> EvalResult<'tcx> {
// For now, we only retag if the toplevel type is a reference.
// TODO: Recurse into structs and enums, sharing code with validation.
// TODO: Honor `fn_entry`.
let mutbl = match place.layout.ty.sty {
ty::Ref(_, _, mutbl) => mutbl, // go ahead
_ => return Ok(()), // don't do a thing
_ => return Ok(()), // do nothing, for now
};
// We want to reborrow the reference stored there. This will call the hooks
// above. First deref, which will call `tag_dereference`.
// (This is somewhat redundant because validation already did the same thing,
// but what can you do.)
// Retag the pointer and write it back.
let val = self.read_immediate(self.place_to_op(place)?)?;
let dest = self.ref_to_mplace(val)?;
// Now put a new ref into the old place, which will call `tag_reference`.
// FIXME: Honor `fn_entry`!
let val = self.create_ref(dest, Some(mutbl))?;
let val = self.retag_ptr(val, mutbl)?;
self.write_immediate(val, place)?;
Ok(())
}
fn escape_to_raw(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
) -> EvalResult<'tcx> {
trace!("self: {:?} is now accessible by raw pointers", *place);
// Re-borrow to raw. This is a NOP for shared borrows, but we do not know the borrow
// type here and that's also okay.
let ptr = place.ptr.to_ptr()?;
self.memory().check_bounds(ptr, size, false)?; // `ptr_dereference` wouldn't do any checks if this is a raw ptr
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
alloc.extra.use_and_maybe_re_borrow(ptr, size, UsageKind::Raw, Some(Borrow::default()))?;
Ok(())
}
}

View File

@ -1,4 +1,4 @@
#![allow(unused_variables)]
// error-pattern: mutable reference with frozen tag
mod safe {
use std::slice::from_raw_parts_mut;
@ -12,8 +12,10 @@ mod safe {
fn main() {
let v = vec![0,1,2];
let v1 = safe::as_mut_slice(&v);
let _v1 = safe::as_mut_slice(&v);
/*
let v2 = safe::as_mut_slice(&v);
v1[1] = 5; //~ ERROR does not exist on the stack
v1[1] = 5;
v1[1] = 6;
*/
}

View File

@ -11,7 +11,6 @@ mod safe {
assert!(mid <= len);
(from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid"
//~^ ERROR does not exist on the stack
from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
}
}
@ -20,6 +19,7 @@ mod safe {
fn main() {
let mut array = [1,2,3,4];
let (a, b) = safe::split_at_mut(&mut array, 0);
//~^ ERROR does not exist on the stack
a[1] = 5;
b[1] = 6;
}

View File

@ -1,3 +1,6 @@
// FIXME still considering whether we are okay with this not being an error
// ignore-test
static X: usize = 5;
#[allow(mutable_transmutes)]

View File

@ -0,0 +1,12 @@
// Make sure we cannot use raw ptrs that got transmuted from mutable references
// (i.e, no EscapeToRaw happened).
// We could, in principle, to EscapeToRaw lazily to allow this code, but that
// would no alleviate the need for EscapeToRaw (see `ref_raw_int_raw` in
// `run-pass/stacked-borrows.rs`), and thus increase overall complexity.
use std::mem;
fn main() {
let mut x: i32 = 42;
let raw: *mut i32 = unsafe { mem::transmute(&mut x) };
unsafe { *raw = 13; } //~ ERROR does not exist on the stack
}

View File

@ -1,10 +1,8 @@
use std::mem;
// Make sure we cannot use raw ptrs to access a local that
// has never been escaped to the raw world.
// we took the direct address of.
fn main() {
let mut x = 42;
let ptr = &mut x;
let raw: *mut i32 = unsafe { mem::transmute(ptr) };
let raw = &mut x as *mut i32 as usize as *mut i32;
let _ptr = &mut x;
unsafe { *raw = 13; } //~ ERROR does not exist on the stack
}

View File

@ -3,15 +3,17 @@ fn main() {
deref_partially_dangling_raw();
read_does_not_invalidate1();
read_does_not_invalidate2();
ref_raw_int_raw();
}
// Deref a raw ptr to access a field of a large struct, where the field
// is allocated but not the entire struct is.
// For now, we want to allow this.
fn deref_partially_dangling_raw() {
let x = (1, 1);
let x = (1, 13);
let xptr = &x as *const _ as *const (i32, i32, i32);
let _val = unsafe { (*xptr).1 };
let val = unsafe { (*xptr).1 };
assert_eq!(val, 13);
}
// Make sure that reading from an `&mut` does, like reborrowing to `&`,
@ -23,7 +25,7 @@ fn read_does_not_invalidate1() {
let _val = x.1; // we just read, this does NOT invalidate the reborrows.
ret
}
foo(&mut (1, 2));
assert_eq!(*foo(&mut (1, 2)), 2);
}
// Same as above, but this time we first create a raw, then read from `&mut`
// and then freeze from the raw.
@ -34,5 +36,15 @@ fn read_does_not_invalidate2() {
let ret = unsafe { &(*xraw).1 };
ret
}
foo(&mut (1, 2));
assert_eq!(*foo(&mut (1, 2)), 2);
}
// Just to make sure that casting a ref to raw, to int and back to raw
// and only then using it works. This rules out ideas like "do escape-to-raw lazily":
// After casting to int and back, we lost the tag that could have let us do that.
fn ref_raw_int_raw() {
let mut x = 3;
let xref = &mut x;
let xraw = xref as *mut i32 as usize as *mut i32;
assert_eq!(unsafe { *xraw }, 3);
}