Merge pull request #524 from RalfJung/escape-to-raw
Stacked Borrows beautififcation, update for EscapeToRaw
This commit is contained in:
commit
d78b62e3e4
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,6 +2,5 @@ target
|
||||
/doc
|
||||
tex/*/out
|
||||
*.dot
|
||||
*.mir
|
||||
*.rs.bk
|
||||
Cargo.lock
|
||||
|
13
README.md
13
README.md
@ -45,14 +45,19 @@ in this directory.
|
||||
## Running Miri
|
||||
|
||||
```sh
|
||||
cargo +nightly run tests/run-pass/vecs.rs # Or whatever test you like.
|
||||
cargo +nightly run -- -Zmiri-disable-validation tests/run-pass/vecs.rs # Or whatever test you like.
|
||||
```
|
||||
|
||||
We have to disable validation because that can lead to errors when libstd is not
|
||||
compiled the right way.
|
||||
|
||||
## Running Miri with full libstd
|
||||
|
||||
Per default libstd does not contain the MIR of non-polymorphic functions. When
|
||||
Miri hits a call to such a function, execution terminates. To fix this, it is
|
||||
possible to compile libstd with full MIR:
|
||||
Per default libstd does not contain the MIR of non-polymorphic functions, and
|
||||
also does not contain some extra MIR statements that miri needs for validation.
|
||||
When Miri hits a call to such a function, execution terminates, and even when
|
||||
the MIR is present, validation can fail. To fix this, it is possible to compile
|
||||
libstd with full MIR:
|
||||
|
||||
```sh
|
||||
rustup component add --toolchain nightly rust-src
|
||||
|
@ -1 +1 @@
|
||||
nightly-2018-11-15
|
||||
nightly-2018-11-16
|
||||
|
@ -555,7 +555,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
|
||||
}
|
||||
"pthread_attr_getstack" => {
|
||||
// second argument is where we are supposed to write the stack size
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[1])?)?;
|
||||
let ptr = self.deref_operand(args[1])?;
|
||||
let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
|
||||
self.write_scalar(stackaddr, ptr.into())?;
|
||||
// return 0
|
||||
|
@ -33,13 +33,13 @@ impl<Tag> ScalarExt for ScalarMaybeUndef<Tag> {
|
||||
pub trait EvalContextExt<'tcx> {
|
||||
fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>>;
|
||||
|
||||
/// Visit the memory covered by `place` that is frozen -- i.e., NOT
|
||||
/// what is inside an `UnsafeCell`.
|
||||
fn visit_frozen(
|
||||
/// Visit the memory covered by `place`, sensitive to freezing: The 3rd parameter
|
||||
/// will be true if this is frozen, false if this is in an `UnsafeCell`.
|
||||
fn visit_freeze_sensitive(
|
||||
&self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
action: impl FnMut(Pointer<Borrow>, Size) -> EvalResult<'tcx>,
|
||||
action: impl FnMut(Pointer<Borrow>, Size, bool) -> EvalResult<'tcx>,
|
||||
) -> EvalResult<'tcx>;
|
||||
}
|
||||
|
||||
@ -79,13 +79,11 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
|
||||
})
|
||||
}
|
||||
|
||||
/// Visit the memory covered by `place` that is frozen -- i.e., NOT
|
||||
/// what is inside an `UnsafeCell`.
|
||||
fn visit_frozen(
|
||||
fn visit_freeze_sensitive(
|
||||
&self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
mut frozen_action: impl FnMut(Pointer<Borrow>, Size) -> EvalResult<'tcx>,
|
||||
mut action: impl FnMut(Pointer<Borrow>, Size, bool) -> EvalResult<'tcx>,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("visit_frozen(place={:?}, size={:?})", *place, size);
|
||||
debug_assert_eq!(size,
|
||||
@ -99,18 +97,29 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
|
||||
let mut end_ptr = place.ptr;
|
||||
// Called when we detected an `UnsafeCell` at the given offset and size.
|
||||
// Calls `action` and advances `end_ptr`.
|
||||
let mut unsafe_cell_action = |unsafe_cell_offset, unsafe_cell_size| {
|
||||
let mut unsafe_cell_action = |unsafe_cell_ptr: Scalar<Borrow>, unsafe_cell_size: Size| {
|
||||
if unsafe_cell_size != Size::ZERO {
|
||||
debug_assert_eq!(unsafe_cell_ptr.to_ptr().unwrap().alloc_id,
|
||||
end_ptr.to_ptr().unwrap().alloc_id);
|
||||
debug_assert_eq!(unsafe_cell_ptr.to_ptr().unwrap().tag,
|
||||
end_ptr.to_ptr().unwrap().tag);
|
||||
}
|
||||
// We assume that we are given the fields in increasing offset order,
|
||||
// and nothing else changes.
|
||||
let unsafe_cell_offset = unsafe_cell_ptr.get_ptr_offset(self);
|
||||
let end_offset = end_ptr.get_ptr_offset(self);
|
||||
assert!(unsafe_cell_offset >= end_offset);
|
||||
let frozen_size = unsafe_cell_offset - end_offset;
|
||||
// Everything between the end_ptr and this `UnsafeCell` is frozen.
|
||||
if frozen_size != Size::ZERO {
|
||||
frozen_action(end_ptr.to_ptr()?, frozen_size)?;
|
||||
action(end_ptr.to_ptr()?, frozen_size, /*frozen*/true)?;
|
||||
}
|
||||
// This `UnsafeCell` is NOT frozen.
|
||||
if unsafe_cell_size != Size::ZERO {
|
||||
action(unsafe_cell_ptr.to_ptr()?, unsafe_cell_size, /*frozen*/false)?;
|
||||
}
|
||||
// Update end end_ptr.
|
||||
end_ptr = end_ptr.ptr_wrapping_offset(frozen_size+unsafe_cell_size, self);
|
||||
end_ptr = unsafe_cell_ptr.ptr_wrapping_offset(unsafe_cell_size, self);
|
||||
// Done
|
||||
Ok(())
|
||||
};
|
||||
@ -126,7 +135,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
|
||||
.unwrap_or_else(|| place.layout.size_and_align());
|
||||
// Now handle this `UnsafeCell`, unless it is empty.
|
||||
if unsafe_cell_size != Size::ZERO {
|
||||
unsafe_cell_action(place.ptr.get_ptr_offset(self), unsafe_cell_size)
|
||||
unsafe_cell_action(place.ptr, unsafe_cell_size)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@ -136,7 +145,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
|
||||
}
|
||||
// The part between the end_ptr and the end of the place is also frozen.
|
||||
// So pretend there is a 0-sized `UnsafeCell` at the end.
|
||||
unsafe_cell_action(place.ptr.get_ptr_offset(self) + size, Size::ZERO)?;
|
||||
unsafe_cell_action(place.ptr.ptr_wrapping_offset(size, self), Size::ZERO)?;
|
||||
// Done!
|
||||
return Ok(());
|
||||
|
||||
|
@ -59,7 +59,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
"atomic_load_relaxed" |
|
||||
"atomic_load_acq" |
|
||||
"volatile_load" => {
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let ptr = self.deref_operand(args[0])?;
|
||||
let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
|
||||
self.write_scalar(val, dest)?;
|
||||
}
|
||||
@ -68,7 +68,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
"atomic_store_relaxed" |
|
||||
"atomic_store_rel" |
|
||||
"volatile_store" => {
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let ptr = self.deref_operand(args[0])?;
|
||||
let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
|
||||
self.write_scalar(val, ptr.into())?;
|
||||
}
|
||||
@ -78,7 +78,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
}
|
||||
|
||||
_ if intrinsic_name.starts_with("atomic_xchg") => {
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let ptr = self.deref_operand(args[0])?;
|
||||
let new = self.read_scalar(args[1])?;
|
||||
let old = self.read_scalar(ptr.into())?;
|
||||
self.write_scalar(old, dest)?; // old value is returned
|
||||
@ -86,10 +86,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
}
|
||||
|
||||
_ if intrinsic_name.starts_with("atomic_cxchg") => {
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let expect_old = self.read_immediate(args[1])?; // read as value for the sake of `binary_op_imm()`
|
||||
let ptr = self.deref_operand(args[0])?;
|
||||
let expect_old = self.read_immediate(args[1])?; // read as immediate for the sake of `binary_op_imm()`
|
||||
let new = self.read_scalar(args[2])?;
|
||||
let old = self.read_immediate(ptr.into())?; // read as value for the sake of `binary_op_imm()`
|
||||
let old = self.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op_imm()`
|
||||
// binary_op_imm will bail if either of them is not a scalar
|
||||
let (eq, _) = self.binary_op_imm(mir::BinOp::Eq, old, expect_old)?;
|
||||
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
|
||||
@ -125,7 +125,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
"atomic_xsub_rel" |
|
||||
"atomic_xsub_acqrel" |
|
||||
"atomic_xsub_relaxed" => {
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let ptr = self.deref_operand(args[0])?;
|
||||
if !ptr.layout.ty.is_integral() {
|
||||
return err!(Unimplemented(format!("Atomic arithmetic operations only work on integer types")));
|
||||
}
|
||||
@ -167,7 +167,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
}
|
||||
|
||||
"discriminant_value" => {
|
||||
let place = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let place = self.deref_operand(args[0])?;
|
||||
let discr_val = self.read_discriminant(place.into())?.0;
|
||||
self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
|
||||
}
|
||||
@ -279,7 +279,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
}
|
||||
|
||||
"move_val_init" => {
|
||||
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let ptr = self.deref_operand(args[0])?;
|
||||
self.copy_op(args[1], ptr.into())?;
|
||||
}
|
||||
|
||||
@ -347,7 +347,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
}
|
||||
|
||||
"size_of_val" => {
|
||||
let mplace = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let mplace = self.deref_operand(args[0])?;
|
||||
let (size, _) = self.size_and_align_of_mplace(mplace)?
|
||||
.expect("size_of_val called on extern type");
|
||||
let ptr_size = self.pointer_size();
|
||||
@ -359,7 +359,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
|
||||
|
||||
"min_align_of_val" |
|
||||
"align_of_val" => {
|
||||
let mplace = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let mplace = self.deref_operand(args[0])?;
|
||||
let (_, align) = self.size_and_align_of_mplace(mplace)?
|
||||
.expect("size_of_val called on extern type");
|
||||
let ptr_size = self.pointer_size();
|
||||
|
72
src/lib.rs
72
src/lib.rs
@ -296,7 +296,6 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
|
||||
|
||||
type AllocExtra = stacked_borrows::Stacks;
|
||||
type PointerTag = Borrow;
|
||||
const ENABLE_PTR_TRACKING_HOOKS: bool = true;
|
||||
|
||||
type MemoryMap = MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Borrow, Self::AllocExtra>)>;
|
||||
|
||||
@ -309,16 +308,18 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
|
||||
|
||||
// Some functions are whitelisted until we figure out how to fix them.
|
||||
// We walk up the stack a few frames to also cover their callees.
|
||||
const WHITELIST: &[&str] = &[
|
||||
const WHITELIST: &[(&str, &str)] = &[
|
||||
// Uses mem::uninitialized
|
||||
"std::ptr::read",
|
||||
"std::sys::windows::mutex::Mutex::",
|
||||
("std::ptr::read", ""),
|
||||
("std::sys::windows::mutex::Mutex::", ""),
|
||||
// Should directly take a raw reference
|
||||
("<std::cell::UnsafeCell<T>>", "::get"),
|
||||
];
|
||||
for frame in ecx.stack().iter()
|
||||
.rev().take(3)
|
||||
{
|
||||
let name = frame.instance.to_string();
|
||||
if WHITELIST.iter().any(|white| name.starts_with(white)) {
|
||||
if WHITELIST.iter().any(|(prefix, suffix)| name.starts_with(prefix) && name.ends_with(suffix)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -446,26 +447,6 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
|
||||
Cow::Owned(alloc)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn tag_reference(
|
||||
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
mutability: Option<hir::Mutability>,
|
||||
) -> EvalResult<'tcx, Scalar<Borrow>> {
|
||||
let (size, _) = ecx.size_and_align_of_mplace(place)?
|
||||
// for extern types, just cover what we can
|
||||
.unwrap_or_else(|| place.layout.size_and_align());
|
||||
if !ecx.machine.validate || size == Size::ZERO {
|
||||
// No tracking
|
||||
Ok(place.ptr)
|
||||
} else {
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
let tag = ecx.tag_reference(place, size, mutability.into())?;
|
||||
Ok(Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn tag_dereference(
|
||||
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
@ -474,11 +455,13 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
|
||||
let (size, _) = ecx.size_and_align_of_mplace(place)?
|
||||
// for extern types, just cover what we can
|
||||
.unwrap_or_else(|| place.layout.size_and_align());
|
||||
if !ecx.machine.validate || size == Size::ZERO {
|
||||
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag ||
|
||||
!Self::enforce_validity(ecx) || size == Size::ZERO
|
||||
{
|
||||
// No tracking
|
||||
Ok(place.ptr)
|
||||
} else {
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
let ptr = place.ptr.to_ptr()?; // assert this is not a scalar
|
||||
let tag = ecx.tag_dereference(place, size, mutability.into())?;
|
||||
Ok(Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)))
|
||||
}
|
||||
@ -499,6 +482,31 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn escape_to_raw(
|
||||
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
|
||||
ptr: OpTy<'tcx, Self::PointerTag>,
|
||||
) -> EvalResult<'tcx> {
|
||||
// It is tempting to check the type here, but drop glue does EscapeToRaw
|
||||
// on a raw pointer.
|
||||
// This is deliberately NOT `deref_operand` as we do not want `tag_dereference`
|
||||
// to be called! That would kill the original tag if we got a raw ptr.
|
||||
let place = ecx.ref_to_mplace(ecx.read_immediate(ptr)?)?;
|
||||
let (size, _) = ecx.size_and_align_of_mplace(place)?
|
||||
// for extern types, just cover what we can
|
||||
.unwrap_or_else(|| place.layout.size_and_align());
|
||||
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag ||
|
||||
!ecx.machine.validate || size == Size::ZERO
|
||||
{
|
||||
// No tracking, or no retagging. The latter is possible because a dependency of ours
|
||||
// might be called with different flags than we are, so there are `Retag`
|
||||
// statements but we do not want to execute them.
|
||||
Ok(())
|
||||
} else {
|
||||
ecx.escape_to_raw(place, size)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn retag(
|
||||
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
|
||||
@ -506,12 +514,14 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
|
||||
place: PlaceTy<'tcx, Borrow>,
|
||||
) -> EvalResult<'tcx> {
|
||||
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag || !Self::enforce_validity(ecx) {
|
||||
// No tracking, or no retagging. This is possible because a dependency of ours might be
|
||||
// called with different flags than we are,
|
||||
// No tracking, or no retagging. The latter is possible because a dependency of ours
|
||||
// might be called with different flags than we are, so there are `Retag`
|
||||
// statements but we do not want to execute them.
|
||||
// Also, honor the whitelist in `enforce_validity` because otherwise we might retag
|
||||
// uninitialized data.
|
||||
return Ok(())
|
||||
Ok(())
|
||||
} else {
|
||||
ecx.retag(fn_entry, place)
|
||||
}
|
||||
ecx.retag(fn_entry, place)
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,12 @@
|
||||
use std::cell::RefCell;
|
||||
|
||||
use rustc::ty::{self, layout::Size};
|
||||
use rustc::hir;
|
||||
use rustc::hir::{Mutability, MutMutable, MutImmutable};
|
||||
|
||||
use crate::{
|
||||
EvalResult, MiriEvalContext, HelpersEvalContextExt,
|
||||
EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt,
|
||||
MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra,
|
||||
Pointer, PlaceTy, MPlaceTy,
|
||||
Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy,
|
||||
};
|
||||
|
||||
pub type Timestamp = u64;
|
||||
@ -27,7 +27,7 @@ pub enum Borrow {
|
||||
|
||||
impl Borrow {
|
||||
#[inline(always)]
|
||||
pub fn is_shr(self) -> bool {
|
||||
pub fn is_shared(self) -> bool {
|
||||
match self {
|
||||
Borrow::Shr(_) => true,
|
||||
_ => false,
|
||||
@ -35,7 +35,7 @@ impl Borrow {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_uniq(self) -> bool {
|
||||
pub fn is_unique(self) -> bool {
|
||||
match self {
|
||||
Borrow::Uniq(_) => true,
|
||||
_ => false,
|
||||
@ -96,27 +96,17 @@ impl Stack {
|
||||
}
|
||||
}
|
||||
|
||||
/// What kind of usage of the pointer are we talking about?
|
||||
/// What kind of reference is being used?
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub enum UsageKind {
|
||||
/// Write, or create &mut
|
||||
Write,
|
||||
/// Read, or create &
|
||||
Read,
|
||||
/// Create * (raw ptr)
|
||||
pub enum RefKind {
|
||||
/// &mut
|
||||
Unique,
|
||||
/// & without interior mutability
|
||||
Frozen,
|
||||
/// * (raw pointer) or & to `UnsafeCell`
|
||||
Raw,
|
||||
}
|
||||
|
||||
impl From<Option<hir::Mutability>> for UsageKind {
|
||||
fn from(mutbl: Option<hir::Mutability>) -> Self {
|
||||
match mutbl {
|
||||
None => UsageKind::Raw,
|
||||
Some(hir::MutMutable) => UsageKind::Write,
|
||||
Some(hir::MutImmutable) => UsageKind::Read,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extra global machine state
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct State {
|
||||
@ -127,6 +117,12 @@ impl State {
|
||||
pub fn new() -> State {
|
||||
State { clock: 0 }
|
||||
}
|
||||
|
||||
fn increment_clock(&mut self) -> Timestamp {
|
||||
let val = self.clock;
|
||||
self.clock = val + 1;
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
/// Extra per-allocation state
|
||||
@ -136,50 +132,45 @@ pub struct Stacks {
|
||||
stacks: RefCell<RangeMap<Stack>>,
|
||||
}
|
||||
|
||||
/// Core operations
|
||||
/// Core per-location operations: deref, access, create.
|
||||
/// We need to make at least the following things true:
|
||||
///
|
||||
/// U1: After creating a Uniq, it is at the top (+unfrozen).
|
||||
/// U2: If the top is Uniq (+unfrozen), accesses must be through that Uniq or pop it.
|
||||
/// U3: If an access (deref sufficient?) happens with a Uniq, it requires the Uniq to be in the stack.
|
||||
///
|
||||
/// F1: After creating a &, the parts outside `UnsafeCell` are frozen.
|
||||
/// F2: If a write access happens, it unfreezes.
|
||||
/// F3: If an access (well, a deref) happens with an & outside `UnsafeCell`, it requires the location to still be frozen.
|
||||
impl<'tcx> Stack {
|
||||
/// Check if `bor` could be activated by unfreezing and popping.
|
||||
/// `is_write` indicates whether this is being used to write (or, equivalently, to
|
||||
/// borrow as &mut).
|
||||
/// Returns `Err` if the answer is "no"; otherwise the return value indicates what to
|
||||
/// do: With `Some(n)` you need to unfreeze, and then additionally pop `n` items.
|
||||
fn reactivatable(&self, bor: Borrow, is_write: bool) -> Result<Option<usize>, String> {
|
||||
// Check if we can match the frozen "item". Not possible on writes!
|
||||
if !is_write {
|
||||
// For now, we do NOT check the timestamp. That might be surprising, but
|
||||
// we cannot even notice when a location should be frozen but is not!
|
||||
// Those checks are both done in `tag_dereference`, where we have type information.
|
||||
// Either way, it is crucial that the frozen "item" matches raw pointers:
|
||||
// Reading through a raw should not unfreeze.
|
||||
match (self.frozen_since, bor) {
|
||||
(Some(_), Borrow::Shr(_)) => {
|
||||
return Ok(None)
|
||||
/// Deref `bor`: Check if the location is frozen and the tag in the stack.
|
||||
/// This dos *not* constitute an access! "Deref" refers to the `*` operator
|
||||
/// in Rust, and includs cases like `&*x` or `(*x).foo` where no or only part
|
||||
/// of the memory actually gets accessed. Also we cannot know if we are
|
||||
/// going to read or write.
|
||||
/// Returns the index of the item we matched, `None` if it was the frozen one.
|
||||
/// `kind` indicates which kind of reference is being dereferenced.
|
||||
fn deref(&self, bor: Borrow, kind: RefKind) -> Result<Option<usize>, String> {
|
||||
// Checks related to freezing
|
||||
match bor {
|
||||
Borrow::Shr(Some(bor_t)) if kind == RefKind::Frozen => {
|
||||
// We need the location to be frozen. This ensures F3.
|
||||
let frozen = self.frozen_since.map_or(false, |itm_t| itm_t <= bor_t);
|
||||
return if frozen { Ok(None) } else {
|
||||
Err(format!("Location is not frozen long enough"))
|
||||
}
|
||||
_ => {},
|
||||
}
|
||||
Borrow::Shr(_) if self.frozen_since.is_some() => {
|
||||
return Ok(None) // Shared deref to frozen location, looking good
|
||||
}
|
||||
_ => {} // Not sufficient, go on looking.
|
||||
}
|
||||
// See if we can find this borrow.
|
||||
for (idx, &itm) in self.borrows.iter().rev().enumerate() {
|
||||
// Check borrow and stack item for compatibility.
|
||||
// If we got here, we have to look for our item in the stack.
|
||||
for (idx, &itm) in self.borrows.iter().enumerate().rev() {
|
||||
match (itm, bor) {
|
||||
(BorStackItem::FnBarrier(_), _) => {
|
||||
return Err(format!("Trying to reactivate a borrow ({:?}) that lives \
|
||||
behind a barrier", bor))
|
||||
}
|
||||
(BorStackItem::FnBarrier(_), _) => break,
|
||||
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
|
||||
// Found matching unique item.
|
||||
if !is_write {
|
||||
// As a special case, if we are reading and since we *did* find the `Uniq`,
|
||||
// we try to pop less: We are happy with making a `Shr` or `Frz` active;
|
||||
// that one will not mind concurrent reads.
|
||||
match self.reactivatable(Borrow::default(), is_write) {
|
||||
// If we got something better that `idx`, use that
|
||||
Ok(None) => return Ok(None),
|
||||
Ok(Some(shr_idx)) if shr_idx <= idx => return Ok(Some(shr_idx)),
|
||||
// Otherwise just go on.
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
// Found matching unique item. This satisfies U3.
|
||||
return Ok(Some(idx))
|
||||
}
|
||||
(BorStackItem::Shr, Borrow::Shr(_)) => {
|
||||
@ -190,153 +181,170 @@ impl<'tcx> Stack {
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
// Nothing to be found.
|
||||
Err(format!("Borrow-to-reactivate {:?} does not exist on the stack", bor))
|
||||
// If we got here, we did not find our item. We have to error to satisfy U3.
|
||||
Err(format!(
|
||||
"Borrow being dereferenced ({:?}) does not exist on the stack, or is guarded by a barrier",
|
||||
bor
|
||||
))
|
||||
}
|
||||
|
||||
/// Reactive `bor` for this stack. `is_write` indicates whether this is being
|
||||
/// used to write (or, equivalently, to borrow as &mut).
|
||||
fn reactivate(&mut self, bor: Borrow, is_write: bool) -> EvalResult<'tcx> {
|
||||
let mut pop = match self.reactivatable(bor, is_write) {
|
||||
Ok(None) => return Ok(()),
|
||||
Ok(Some(pop)) => pop,
|
||||
Err(err) => return err!(MachineError(err)),
|
||||
};
|
||||
// Pop what `reactivatable` told us to pop. Always unfreeze.
|
||||
/// Perform an actual memory access using `bor`. We do not know any types here
|
||||
/// or whether things should be frozen, but we *do* know if this is reading
|
||||
/// or writing.
|
||||
fn access(&mut self, bor: Borrow, is_write: bool) -> EvalResult<'tcx> {
|
||||
// Check if we can match the frozen "item".
|
||||
// Not possible on writes!
|
||||
if self.is_frozen() {
|
||||
trace!("reactivate: Unfreezing");
|
||||
if !is_write {
|
||||
// When we are frozen, we just accept all reads. No harm in this.
|
||||
// The deref already checked that `Uniq` items are in the stack, and that
|
||||
// the location is frozen if it should be.
|
||||
return Ok(());
|
||||
}
|
||||
trace!("access: Unfreezing");
|
||||
}
|
||||
// Unfreeze on writes. This ensures F2.
|
||||
self.frozen_since = None;
|
||||
while pop > 0 {
|
||||
let itm = self.borrows.pop().unwrap();
|
||||
trace!("reactivate: Popping {:?}", itm);
|
||||
pop -= 1;
|
||||
// Pop the stack until we have something matching.
|
||||
while let Some(&itm) = self.borrows.last() {
|
||||
match (itm, bor) {
|
||||
(BorStackItem::FnBarrier(_), _) => break,
|
||||
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
|
||||
// Found matching unique item.
|
||||
return Ok(())
|
||||
}
|
||||
(BorStackItem::Shr, _) if !is_write => {
|
||||
// When reading, everything can use a shared item!
|
||||
// We do not want to do this when writing: Writing to an `&mut`
|
||||
// should reaffirm its exclusivity (i.e., make sure it is
|
||||
// on top of the stack).
|
||||
return Ok(())
|
||||
}
|
||||
(BorStackItem::Shr, Borrow::Shr(_)) => {
|
||||
// Found matching shared item.
|
||||
return Ok(())
|
||||
}
|
||||
_ => {
|
||||
// Pop this. This ensures U2.
|
||||
let itm = self.borrows.pop().unwrap();
|
||||
trace!("access: Popping {:?}", itm);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
// If we got here, we did not find our item.
|
||||
err!(MachineError(format!(
|
||||
"Borrow being accessed ({:?}) does not exist on the stack, or is guarded by a barrier",
|
||||
bor
|
||||
)))
|
||||
}
|
||||
|
||||
/// Initiate `bor`; mostly this means pushing.
|
||||
/// This operation cannot fail; it is up to the caller to ensure that the precondition
|
||||
/// is met: We cannot push onto frozen stacks.
|
||||
fn initiate(&mut self, bor: Borrow) {
|
||||
if let Some(_) = self.frozen_since {
|
||||
// "Pushing" a Shr or Frz on top is redundant.
|
||||
match bor {
|
||||
Borrow::Uniq(_) => bug!("Trying to create unique ref to frozen location"),
|
||||
Borrow::Shr(_) => trace!("initiate: New shared ref to frozen location is a NOP"),
|
||||
}
|
||||
/// is met: We cannot push `Uniq` onto frozen stacks.
|
||||
/// `kind` indicates which kind of reference is being created.
|
||||
fn create(&mut self, bor: Borrow, kind: RefKind) {
|
||||
// First, push the item. We do this even if we will later freeze, because we
|
||||
// will allow mutation of shared data at the expense of unfreezing.
|
||||
if self.frozen_since.is_some() {
|
||||
// A frozen location, this should be impossible!
|
||||
bug!("We should never try pushing to a frozen stack");
|
||||
}
|
||||
// First, push.
|
||||
let itm = match bor {
|
||||
Borrow::Uniq(t) => BorStackItem::Uniq(t),
|
||||
Borrow::Shr(_) => BorStackItem::Shr,
|
||||
};
|
||||
if *self.borrows.last().unwrap() == itm {
|
||||
assert!(bor.is_shared());
|
||||
trace!("create: Sharing a shared location is a NOP");
|
||||
} else {
|
||||
// Just push.
|
||||
let itm = match bor {
|
||||
Borrow::Uniq(t) => BorStackItem::Uniq(t),
|
||||
Borrow::Shr(_) if *self.borrows.last().unwrap() == BorStackItem::Shr => {
|
||||
// Optimization: Don't push a Shr onto a Shr.
|
||||
trace!("initiate: New shared ref to already shared location is a NOP");
|
||||
return
|
||||
},
|
||||
Borrow::Shr(_) => BorStackItem::Shr,
|
||||
// This ensures U1.
|
||||
trace!("create: Pushing {:?}", itm);
|
||||
self.borrows.push(itm);
|
||||
}
|
||||
// Then, maybe freeze. This is part 2 of ensuring F1.
|
||||
if kind == RefKind::Frozen {
|
||||
let bor_t = match bor {
|
||||
Borrow::Shr(Some(t)) => t,
|
||||
_ => bug!("Creating illegal borrow {:?} for frozen ref", bor),
|
||||
};
|
||||
trace!("initiate: Pushing {:?}", itm);
|
||||
self.borrows.push(itm)
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this location is "frozen enough".
|
||||
fn check_frozen(&self, bor_t: Timestamp) -> EvalResult<'tcx> {
|
||||
let frozen = self.frozen_since.map_or(false, |itm_t| itm_t <= bor_t);
|
||||
if !frozen {
|
||||
err!(MachineError(format!("Location is not frozen long enough")))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Freeze this location, since `bor_t`.
|
||||
fn freeze(&mut self, bor_t: Timestamp) {
|
||||
if let Some(itm_t) = self.frozen_since {
|
||||
assert!(itm_t <= bor_t, "Trying to freeze shorter than it was frozen?");
|
||||
} else {
|
||||
trace!("Freezing");
|
||||
trace!("create: Freezing");
|
||||
self.frozen_since = Some(bor_t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn increment_clock(&mut self) -> Timestamp {
|
||||
let val = self.clock;
|
||||
self.clock = val + 1;
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
/// Higher-level operations
|
||||
/// Higher-level per-location operations: deref, access, reborrow.
|
||||
impl<'tcx> Stacks {
|
||||
/// The single most important operation: Make sure that using `ptr` is okay,
|
||||
/// and if `new_bor` is present then make that the new current borrow.
|
||||
fn use_and_maybe_re_borrow(
|
||||
&self,
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
usage: UsageKind,
|
||||
new_bor: Option<Borrow>,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("use_and_maybe_re_borrow of tag {:?} as {:?}, new {:?}: {:?}, size {}",
|
||||
ptr.tag, usage, new_bor, ptr, size.bytes());
|
||||
let mut stacks = self.stacks.borrow_mut();
|
||||
for stack in stacks.iter_mut(ptr.offset, size) {
|
||||
stack.reactivate(ptr.tag, usage == UsageKind::Write)?;
|
||||
if let Some(new_bor) = new_bor {
|
||||
stack.initiate(new_bor);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Freeze the given memory range.
|
||||
fn freeze(
|
||||
&self,
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
bor_t: Timestamp
|
||||
) -> EvalResult<'tcx> {
|
||||
let mut stacks = self.stacks.borrow_mut();
|
||||
for stack in stacks.iter_mut(ptr.offset, size) {
|
||||
stack.freeze(bor_t);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check that this stack is fine with being dereferenced
|
||||
fn check_deref(
|
||||
fn deref(
|
||||
&self,
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
kind: RefKind,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("deref for tag {:?} as {:?}: {:?}, size {}",
|
||||
ptr.tag, kind, ptr, size.bytes());
|
||||
let mut stacks = self.stacks.borrow_mut();
|
||||
// We need `iter_mut` because `iter` would skip gaps!
|
||||
for stack in stacks.iter_mut(ptr.offset, size) {
|
||||
// Conservatively assume we will just read
|
||||
if let Err(err) = stack.reactivatable(ptr.tag, /*is_write*/false) {
|
||||
return err!(MachineError(format!(
|
||||
"Encountered reference with non-reactivatable tag: {}",
|
||||
err
|
||||
)))
|
||||
}
|
||||
stack.deref(ptr.tag, kind).map_err(EvalErrorKind::MachineError)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check that this stack is appropriately frozen
|
||||
fn check_frozen(
|
||||
/// `ptr` got used, reflect that in the stack.
|
||||
fn access(
|
||||
&self,
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
bor_t: Timestamp
|
||||
is_write: bool,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("{} access of tag {:?}: {:?}, size {}",
|
||||
if is_write { "read" } else { "write" },
|
||||
ptr.tag, ptr, size.bytes());
|
||||
let mut stacks = self.stacks.borrow_mut();
|
||||
for stack in stacks.iter_mut(ptr.offset, size) {
|
||||
stack.check_frozen(bor_t)?;
|
||||
stack.access(ptr.tag, is_write)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reborrow the given pointer to the new tag for the given kind of reference.
|
||||
fn reborrow(
|
||||
&self,
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
new_bor: Borrow,
|
||||
new_kind: RefKind,
|
||||
) -> EvalResult<'tcx> {
|
||||
assert_eq!(new_bor.is_unique(), new_kind == RefKind::Unique);
|
||||
trace!("reborrow for tag {:?} to {:?} as {:?}: {:?}, size {}",
|
||||
ptr.tag, new_bor, new_kind, ptr, size.bytes());
|
||||
let mut stacks = self.stacks.borrow_mut();
|
||||
for stack in stacks.iter_mut(ptr.offset, size) {
|
||||
// Access source `ptr`, create new ref.
|
||||
let ptr_idx = stack.deref(ptr.tag, new_kind).map_err(EvalErrorKind::MachineError)?;
|
||||
// If we can deref the new tag already, and if that tag lives higher on
|
||||
// the stack than the one we come from, just use that.
|
||||
// IOW, we check if `new_bor` *already* is "derived from" `ptr.tag`.
|
||||
// This also checks frozenness, if required.
|
||||
let bor_redundant = match (ptr_idx, stack.deref(new_bor, new_kind)) {
|
||||
// If the new borrow works with the frozen item, or else if it lives
|
||||
// above the old one in the stack, our job here is done.
|
||||
(_, Ok(None)) => true,
|
||||
(Some(ptr_idx), Ok(Some(new_idx))) if new_idx >= ptr_idx => true,
|
||||
// Otherwise we need to create a new borrow.
|
||||
_ => false,
|
||||
};
|
||||
if bor_redundant {
|
||||
assert!(new_bor.is_shared(), "A unique reborrow can never be redundant");
|
||||
trace!("reborrow is redundant");
|
||||
continue;
|
||||
}
|
||||
// We need to do some actual work.
|
||||
stack.access(ptr.tag, new_kind == RefKind::Unique)?;
|
||||
stack.create(new_bor, new_kind);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -350,8 +358,7 @@ impl AllocationExtra<Borrow> for Stacks {
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
) -> EvalResult<'tcx> {
|
||||
// Reads behave exactly like the first half of a reborrow-to-shr
|
||||
alloc.extra.use_and_maybe_re_borrow(ptr, size, UsageKind::Read, None)
|
||||
alloc.extra.access(ptr, size, /*is_write*/false)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -360,8 +367,7 @@ impl AllocationExtra<Borrow> for Stacks {
|
||||
ptr: Pointer<Borrow>,
|
||||
size: Size,
|
||||
) -> EvalResult<'tcx> {
|
||||
// Writes behave exactly like the first half of a reborrow-to-mut
|
||||
alloc.extra.use_and_maybe_re_borrow(ptr, size, UsageKind::Write, None)
|
||||
alloc.extra.access(ptr, size, /*is_write*/true)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -371,7 +377,7 @@ impl AllocationExtra<Borrow> for Stacks {
|
||||
size: Size,
|
||||
) -> EvalResult<'tcx> {
|
||||
// This is like mutating
|
||||
alloc.extra.use_and_maybe_re_borrow(ptr, size, UsageKind::Write, None)
|
||||
alloc.extra.access(ptr, size, /*is_write*/true)
|
||||
// FIXME: Error out of there are any barriers?
|
||||
}
|
||||
}
|
||||
@ -395,18 +401,11 @@ impl<'tcx> Stacks {
|
||||
|
||||
|
||||
pub trait EvalContextExt<'tcx> {
|
||||
fn tag_reference(
|
||||
&mut self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
usage: UsageKind,
|
||||
) -> EvalResult<'tcx, Borrow>;
|
||||
|
||||
fn tag_dereference(
|
||||
&self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
usage: UsageKind,
|
||||
mutability: Option<Mutability>,
|
||||
) -> EvalResult<'tcx, Borrow>;
|
||||
|
||||
fn tag_new_allocation(
|
||||
@ -415,113 +414,27 @@ pub trait EvalContextExt<'tcx> {
|
||||
kind: MemoryKind<MiriMemoryKind>,
|
||||
) -> Borrow;
|
||||
|
||||
/// Retag an indidual pointer, returning the retagged version.
|
||||
fn reborrow(
|
||||
&mut self,
|
||||
ptr: ImmTy<'tcx, Borrow>,
|
||||
mutbl: Mutability,
|
||||
) -> EvalResult<'tcx, Immediate<Borrow>>;
|
||||
|
||||
fn retag(
|
||||
&mut self,
|
||||
fn_entry: bool,
|
||||
place: PlaceTy<'tcx, Borrow>
|
||||
) -> EvalResult<'tcx>;
|
||||
}
|
||||
|
||||
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
|
||||
/// Called for place-to-value conversion.
|
||||
fn tag_reference(
|
||||
fn escape_to_raw(
|
||||
&mut self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
usage: UsageKind,
|
||||
) -> EvalResult<'tcx, Borrow> {
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
let time = self.machine.stacked_borrows.increment_clock();
|
||||
let new_bor = match usage {
|
||||
UsageKind::Write => Borrow::Uniq(time),
|
||||
UsageKind::Read => Borrow::Shr(Some(time)),
|
||||
UsageKind::Raw => Borrow::Shr(None),
|
||||
};
|
||||
trace!("tag_reference: Creating new reference ({:?}) for {:?} (pointee {}): {:?}",
|
||||
usage, ptr, place.layout.ty, new_bor);
|
||||
|
||||
// Update the stacks. First create the new ref as usual, then maybe freeze stuff.
|
||||
self.memory().check_bounds(ptr, size, false)?;
|
||||
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
|
||||
alloc.extra.use_and_maybe_re_borrow(ptr, size, usage, Some(new_bor))?;
|
||||
// Maybe freeze stuff
|
||||
if let Borrow::Shr(Some(bor_t)) = new_bor {
|
||||
self.visit_frozen(place, size, |frz_ptr, size| {
|
||||
debug_assert_eq!(frz_ptr.alloc_id, ptr.alloc_id);
|
||||
// Be frozen!
|
||||
alloc.extra.freeze(frz_ptr, size, bor_t)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(new_bor)
|
||||
}
|
||||
|
||||
/// Called for value-to-place conversion.
|
||||
///
|
||||
/// Note that this does NOT mean that all this memory will actually get accessed/referenced!
|
||||
/// We could be in the middle of `&(*var).1`.
|
||||
fn tag_dereference(
|
||||
&self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
usage: UsageKind,
|
||||
) -> EvalResult<'tcx, Borrow> {
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
trace!("tag_dereference: Accessing reference ({:?}) for {:?} (pointee {})",
|
||||
usage, ptr, place.layout.ty);
|
||||
// In principle we should not have to do anything here. However, with transmutes involved,
|
||||
// it can happen that the tag of `ptr` does not actually match `usage`, and we
|
||||
// should adjust for that.
|
||||
// Notably, the compiler can introduce such transmutes by optimizing away `&[mut]*`.
|
||||
// That can transmute a raw ptr to a (shared/mut) ref, and a mut ref to a shared one.
|
||||
match (usage, ptr.tag) {
|
||||
(UsageKind::Raw, _) => {
|
||||
// Don't use the tag, this is a raw access! Even if there is a tag,
|
||||
// that means transmute happened and we ignore the tag.
|
||||
// Also don't do any further validation, this is raw after all.
|
||||
return Ok(Borrow::default());
|
||||
}
|
||||
(UsageKind::Write, Borrow::Uniq(_)) |
|
||||
(UsageKind::Read, Borrow::Shr(_)) => {
|
||||
// Expected combinations. Nothing to do.
|
||||
}
|
||||
(UsageKind::Write, Borrow::Shr(None)) => {
|
||||
// Raw transmuted to mut ref. Keep this as raw access.
|
||||
// We cannot reborrow here; there might be a raw in `&(*var).1` where
|
||||
// `var` is an `&mut`. The other field of the struct might be already frozen,
|
||||
// also using `var`, and that would be okay.
|
||||
}
|
||||
(UsageKind::Read, Borrow::Uniq(_)) => {
|
||||
// A mut got transmuted to shr. Can happen even from compiler transformations:
|
||||
// `&*x` gets optimized to `x` even when `x` is a `&mut`.
|
||||
}
|
||||
(UsageKind::Write, Borrow::Shr(Some(_))) => {
|
||||
// This is just invalid: A shr got transmuted to a mut.
|
||||
// If we ever allow this, we have to consider what we do when a turn a
|
||||
// `Raw`-tagged `&mut` into a raw pointer pointing to a frozen location.
|
||||
// We probably do not want to allow that, but we have to allow
|
||||
// turning a `Raw`-tagged `&` into a raw ptr to a frozen location.
|
||||
return err!(MachineError(format!("Encountered mutable reference with frozen tag {:?}", ptr.tag)))
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here, we do some checking, *but* we leave the tag unchanged.
|
||||
self.memory().check_bounds(ptr, size, false)?;
|
||||
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
|
||||
alloc.extra.check_deref(ptr, size)?;
|
||||
// Maybe check frozen stuff
|
||||
if let Borrow::Shr(Some(bor_t)) = ptr.tag {
|
||||
self.visit_frozen(place, size, |frz_ptr, size| {
|
||||
debug_assert_eq!(frz_ptr.alloc_id, ptr.alloc_id);
|
||||
// Are you frozen?
|
||||
alloc.extra.check_frozen(frz_ptr, size, bor_t)
|
||||
})?;
|
||||
}
|
||||
|
||||
// All is good, and do not change the tag
|
||||
Ok(ptr.tag)
|
||||
}
|
||||
) -> EvalResult<'tcx>;
|
||||
}
|
||||
|
||||
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
|
||||
fn tag_new_allocation(
|
||||
&mut self,
|
||||
id: AllocId,
|
||||
@ -551,6 +464,139 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
|
||||
Borrow::Uniq(time)
|
||||
}
|
||||
|
||||
/// Called for value-to-place conversion. `mutability` is `None` for raw pointers.
|
||||
///
|
||||
/// Note that this does NOT mean that all this memory will actually get accessed/referenced!
|
||||
/// We could be in the middle of `&(*var).1`.
|
||||
fn tag_dereference(
|
||||
&self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
mutability: Option<Mutability>,
|
||||
) -> EvalResult<'tcx, Borrow> {
|
||||
trace!("tag_dereference: Accessing {} reference for {:?} (pointee {})",
|
||||
if let Some(mutability) = mutability { format!("{:?}", mutability) } else { format!("raw") },
|
||||
place.ptr, place.layout.ty);
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
// In principle we should not have to do anything here. However, with transmutes involved,
|
||||
// it can happen that the tag of `ptr` does not actually match `mutability`, and we
|
||||
// should adjust for that.
|
||||
// Notably, the compiler can introduce such transmutes by optimizing away `&[mut]*`.
|
||||
// That can transmute a raw ptr to a (shared/mut) ref, and a mut ref to a shared one.
|
||||
match (mutability, ptr.tag) {
|
||||
(None, _) => {
|
||||
// Don't use the tag, this is a raw access! They should happen tagless.
|
||||
// This is needed for `*mut` to make any sense: Writes *do* enforce the
|
||||
// `Uniq` tag to be up top, but we must make sure raw writes do not do that.
|
||||
// This does mean, however, that `&*foo` is *not* a NOP *if* `foo` is a raw ptr.
|
||||
// Also don't do any further validation, this is raw after all.
|
||||
return Ok(Borrow::default());
|
||||
}
|
||||
(Some(MutMutable), Borrow::Uniq(_)) |
|
||||
(Some(MutImmutable), Borrow::Shr(_)) => {
|
||||
// Expected combinations. Nothing to do.
|
||||
}
|
||||
(Some(MutMutable), Borrow::Shr(None)) => {
|
||||
// Raw transmuted to mut ref. This is something real unsafe code does.
|
||||
// We cannot reborrow here because we do not want to mutate state on a deref.
|
||||
}
|
||||
(Some(MutImmutable), Borrow::Uniq(_)) => {
|
||||
// A mut got transmuted to shr. Can happen even from compiler transformations:
|
||||
// `&*x` gets optimized to `x` even when `x` is a `&mut`.
|
||||
}
|
||||
(Some(MutMutable), Borrow::Shr(Some(_))) => {
|
||||
// This is just invalid: A shr got transmuted to a mut.
|
||||
// If we ever allow this, we have to consider what we do when a turn a
|
||||
// `Raw`-tagged `&mut` into a raw pointer pointing to a frozen location.
|
||||
// We probably do not want to allow that, but we have to allow
|
||||
// turning a `Raw`-tagged `&` into a raw ptr to a frozen location.
|
||||
return err!(MachineError(format!("Encountered mutable reference with frozen tag {:?}", ptr.tag)))
|
||||
}
|
||||
}
|
||||
|
||||
// Get the allocation
|
||||
self.memory().check_bounds(ptr, size, false)?;
|
||||
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
|
||||
// If we got here, we do some checking, *but* we leave the tag unchanged.
|
||||
if let Borrow::Shr(Some(_)) = ptr.tag {
|
||||
assert_eq!(mutability, Some(MutImmutable));
|
||||
// We need a frozen-sensitive check
|
||||
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
|
||||
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
|
||||
alloc.extra.deref(cur_ptr, size, kind)
|
||||
})?;
|
||||
} else {
|
||||
// Just treat this as one big chunk
|
||||
let kind = if mutability == Some(MutMutable) { RefKind::Unique } else { RefKind::Raw };
|
||||
alloc.extra.deref(ptr, size, kind)?;
|
||||
}
|
||||
|
||||
// All is good, and do not change the tag
|
||||
Ok(ptr.tag)
|
||||
}
|
||||
|
||||
/// The given place may henceforth be accessed through raw pointers.
|
||||
fn escape_to_raw(
|
||||
&mut self,
|
||||
place: MPlaceTy<'tcx, Borrow>,
|
||||
size: Size,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("escape_to_raw: {:?} is now accessible by raw pointers", *place);
|
||||
// Get the allocation
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
self.memory().check_bounds(ptr, size, false)?; // `ptr_dereference` wouldn't do any checks if this is a raw ptr
|
||||
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
|
||||
// Re-borrow to raw. This is a NOP for shared borrows, but we do not know the borrow
|
||||
// type here and that's also okay. Freezing does not matter here.
|
||||
alloc.extra.reborrow(ptr, size, Borrow::default(), RefKind::Raw)
|
||||
}
|
||||
|
||||
fn reborrow(
|
||||
&mut self,
|
||||
val: ImmTy<'tcx, Borrow>,
|
||||
mutbl: Mutability,
|
||||
) -> EvalResult<'tcx, Immediate<Borrow>> {
|
||||
// We want a place for where the ptr *points to*, so we get one.
|
||||
let place = self.ref_to_mplace(val)?;
|
||||
let size = self.size_and_align_of_mplace(place)?
|
||||
.map(|(size, _)| size)
|
||||
.unwrap_or_else(|| place.layout.size);
|
||||
if size == Size::ZERO {
|
||||
// Nothing to do for ZSTs.
|
||||
return Ok(*val);
|
||||
}
|
||||
|
||||
// Prepare to re-borrow this place.
|
||||
let ptr = place.ptr.to_ptr()?;
|
||||
let time = self.machine.stacked_borrows.increment_clock();
|
||||
let new_bor = match mutbl {
|
||||
MutMutable => Borrow::Uniq(time),
|
||||
MutImmutable => Borrow::Shr(Some(time)),
|
||||
};
|
||||
trace!("reborrow: Creating new {:?} reference for {:?} (pointee {}): {:?}",
|
||||
mutbl, ptr, place.layout.ty, new_bor);
|
||||
|
||||
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
|
||||
self.memory().check_bounds(ptr, size, false)?;
|
||||
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
|
||||
// Update the stacks.
|
||||
if mutbl == MutImmutable {
|
||||
// Shared reference. We need a frozen-sensitive reborrow.
|
||||
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
|
||||
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
|
||||
alloc.extra.reborrow(cur_ptr, size, new_bor, kind)
|
||||
})?;
|
||||
} else {
|
||||
// Mutable reference. Just treat this as one big chunk.
|
||||
alloc.extra.reborrow(ptr, size, new_bor, RefKind::Unique)?;
|
||||
}
|
||||
|
||||
// Return new ptr
|
||||
let new_ptr = Pointer::new_with_tag(ptr.alloc_id, ptr.offset, new_bor);
|
||||
let new_place = MemPlace { ptr: Scalar::Ptr(new_ptr), ..*place };
|
||||
Ok(new_place.to_ref())
|
||||
}
|
||||
|
||||
fn retag(
|
||||
&mut self,
|
||||
_fn_entry: bool,
|
||||
@ -558,19 +604,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
|
||||
) -> EvalResult<'tcx> {
|
||||
// For now, we only retag if the toplevel type is a reference.
|
||||
// TODO: Recurse into structs and enums, sharing code with validation.
|
||||
// TODO: Honor `fn_entry`.
|
||||
let mutbl = match place.layout.ty.sty {
|
||||
ty::Ref(_, _, mutbl) => mutbl, // go ahead
|
||||
_ => return Ok(()), // don't do a thing
|
||||
_ => return Ok(()), // do nothing, for now
|
||||
};
|
||||
// We want to reborrow the reference stored there. This will call the hooks
|
||||
// above. First deref, which will call `tag_dereference`.
|
||||
// (This is somewhat redundant because validation already did the same thing,
|
||||
// but what can you do.)
|
||||
// Retag the pointer and write it back.
|
||||
let val = self.read_immediate(self.place_to_op(place)?)?;
|
||||
let dest = self.ref_to_mplace(val)?;
|
||||
// Now put a new ref into the old place, which will call `tag_reference`.
|
||||
// FIXME: Honor `fn_entry`!
|
||||
let val = self.create_ref(dest, Some(mutbl))?;
|
||||
let val = self.reborrow(val, mutbl)?;
|
||||
self.write_immediate(val, place)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -11,5 +11,5 @@ fn main() {
|
||||
retarget(&mut target_alias, target);
|
||||
// now `target_alias` points to the same thing as `target`
|
||||
*target = 13;
|
||||
let _val = *target_alias; //~ ERROR reference with non-reactivatable tag
|
||||
let _val = *target_alias; //~ ERROR does not exist on the stack
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
#![allow(unused_variables)]
|
||||
// error-pattern: mutable reference with frozen tag
|
||||
|
||||
mod safe {
|
||||
use std::slice::from_raw_parts_mut;
|
||||
@ -12,8 +12,10 @@ mod safe {
|
||||
|
||||
fn main() {
|
||||
let v = vec![0,1,2];
|
||||
let v1 = safe::as_mut_slice(&v);
|
||||
let _v1 = safe::as_mut_slice(&v);
|
||||
/*
|
||||
let v2 = safe::as_mut_slice(&v);
|
||||
v1[1] = 5; //~ ERROR does not exist on the stack
|
||||
v1[1] = 5;
|
||||
v1[1] = 6;
|
||||
*/
|
||||
}
|
@ -11,7 +11,6 @@ mod safe {
|
||||
assert!(mid <= len);
|
||||
|
||||
(from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid"
|
||||
//~^ ERROR does not exist on the stack
|
||||
from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
|
||||
}
|
||||
}
|
||||
@ -20,6 +19,7 @@ mod safe {
|
||||
fn main() {
|
||||
let mut array = [1,2,3,4];
|
||||
let (a, b) = safe::split_at_mut(&mut array, 0);
|
||||
//~^ ERROR does not exist on the stack
|
||||
a[1] = 5;
|
||||
b[1] = 6;
|
||||
}
|
29
tests/compile-fail-fullmir/stacked_borrows/illegal_read3.rs
Normal file
29
tests/compile-fail-fullmir/stacked_borrows/illegal_read3.rs
Normal file
@ -0,0 +1,29 @@
|
||||
#![feature(untagged_unions)]
|
||||
// A callee may not read the destination of our `&mut` without
|
||||
// us noticing.
|
||||
// Thise code got carefully checked to not introduce any reborrows
|
||||
// that are not explicit in the source. Let's hope the compiler does not break this later!
|
||||
|
||||
use std::mem;
|
||||
|
||||
fn main() {
|
||||
let mut x: i32 = 15;
|
||||
let xref1 = &mut x;
|
||||
let xref1_sneaky: usize = unsafe { mem::transmute_copy(&xref1) };
|
||||
let xref2 = &mut *xref1; // derived from xref1, so using raw is still okay...
|
||||
callee(xref1_sneaky);
|
||||
let _val = *xref2; // ...but any use of it will invalidate our ref.
|
||||
//~^ ERROR: does not exist on the stack
|
||||
}
|
||||
|
||||
fn callee(xref1: usize) {
|
||||
// Transmuting through a union to avoid retagging
|
||||
union UsizeToRef {
|
||||
from: usize,
|
||||
to: &'static mut i32,
|
||||
}
|
||||
let xref1 = UsizeToRef { from: xref1 };
|
||||
// Doing the deref and the transmute (through the union) in the same place expression
|
||||
// should avoid retagging.
|
||||
let _val = unsafe { *xref1.to };
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
// Using a raw invalidates derived `&mut` even for reading.
|
||||
fn main() {
|
||||
let mut x = 2;
|
||||
let xref1 = &mut x;
|
||||
let xraw = xref1 as *mut _;
|
||||
let xref2 = unsafe { &mut *xraw };
|
||||
let _val = unsafe { *xraw }; // use the raw again, this invalidates xref2 *even* with the special read except for uniq refs
|
||||
let _illegal = *xref2; //~ ERROR does not exist on the stack
|
||||
}
|
16
tests/compile-fail-fullmir/stacked_borrows/illegal_read5.rs
Normal file
16
tests/compile-fail-fullmir/stacked_borrows/illegal_read5.rs
Normal file
@ -0,0 +1,16 @@
|
||||
// We *can* have aliasing &RefCell<T> and &mut T, but we cannot read through the former.
|
||||
// Else we couldn't optimize based on the assumption that `xref` below is truly unique.
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::{mem, ptr};
|
||||
|
||||
fn main() {
|
||||
let rc = RefCell::new(0);
|
||||
let mut refmut = rc.borrow_mut();
|
||||
let xref: &mut i32 = &mut *refmut;
|
||||
let xshr = &rc; // creating this is okay
|
||||
let _val = *xref; // we can even still use our mutable reference
|
||||
mem::forget(unsafe { ptr::read(xshr) }); // but after reading through the shared ref
|
||||
let _val = *xref; // the mutable one is dead and gone
|
||||
//~^ ERROR does not exist on the stack
|
||||
}
|
@ -8,5 +8,5 @@ fn main() {
|
||||
let target = Box::new(42); // has an implicit raw
|
||||
let ref_ = &*target;
|
||||
evil(ref_); // invalidates shared ref, activates raw
|
||||
let _x = *ref_; //~ ERROR is not frozen long enough
|
||||
let _x = *ref_; //~ ERROR is not frozen
|
||||
}
|
@ -4,5 +4,5 @@ fn main() {
|
||||
let r#ref = ⌖ // freeze
|
||||
let ptr = r#ref as *const _ as *mut _; // raw ptr, with raw tag
|
||||
unsafe { *ptr = 42; }
|
||||
let _val = *r#ref; //~ ERROR is not frozen long enough
|
||||
let _val = *r#ref; //~ ERROR is not frozen
|
||||
}
|
@ -9,5 +9,5 @@ fn main() {
|
||||
let ptr = reference as *const _ as *mut i32; // raw ptr, with raw tag
|
||||
let _mut_ref: &mut i32 = unsafe { mem::transmute(ptr) }; // &mut, with raw tag
|
||||
// Now we retag, making our ref top-of-stack -- and, in particular, unfreezing.
|
||||
let _val = *reference; //~ ERROR is not frozen long enough
|
||||
let _val = *reference; //~ ERROR is not frozen
|
||||
}
|
@ -7,7 +7,7 @@ fn main() {
|
||||
let xref = unsafe { &mut *xraw }; // derived from raw, so using raw is still okay...
|
||||
callee(xraw);
|
||||
let _val = *xref; // ...but any use of raw will invalidate our ref.
|
||||
//~^ ERROR: reference with non-reactivatable tag
|
||||
//~^ ERROR: does not exist on the stack
|
||||
}
|
||||
|
||||
fn callee(xraw: *mut i32) {
|
@ -4,6 +4,6 @@ fn main() {
|
||||
let xraw = x as *mut _;
|
||||
let xref = unsafe { &mut *xraw };
|
||||
let xref_in_mem = Box::new(xref);
|
||||
let _val = *x; // invalidate xraw
|
||||
let _val = unsafe { *xraw }; // invalidate xref
|
||||
let _val = *xref_in_mem; //~ ERROR does not exist on the stack
|
||||
}
|
@ -4,6 +4,6 @@ fn main() {
|
||||
let xraw = x as *mut _;
|
||||
let xref = unsafe { &*xraw };
|
||||
let xref_in_mem = Box::new(xref);
|
||||
*x = 42; // invalidate xraw
|
||||
let _val = *xref_in_mem; //~ ERROR does not exist on the stack
|
||||
unsafe { *xraw = 42 }; // unfreeze
|
||||
let _val = *xref_in_mem; //~ ERROR is not frozen
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
fn demo_mut_advanced_unique(our: &mut i32) -> i32 {
|
||||
unknown_code_1(&*our);
|
||||
|
||||
// This "re-asserts" uniqueness of the reference: After writing, we know
|
||||
// our tag is at the top of the stack.
|
||||
*our = 5;
|
||||
|
||||
unknown_code_2();
|
||||
|
||||
// We know this will return 5
|
||||
*our
|
||||
}
|
||||
|
||||
// Now comes the evil context
|
||||
use std::ptr;
|
||||
|
||||
static mut LEAK: *mut i32 = ptr::null_mut();
|
||||
|
||||
fn unknown_code_1(x: &i32) { unsafe {
|
||||
LEAK = x as *const _ as *mut _;
|
||||
} }
|
||||
|
||||
fn unknown_code_2() { unsafe {
|
||||
*LEAK = 7; //~ ERROR does not exist on the stack
|
||||
} }
|
||||
|
||||
fn main() {
|
||||
assert_eq!(demo_mut_advanced_unique(&mut 0), 5);
|
||||
}
|
@ -5,6 +5,6 @@ fn main() {
|
||||
let x = &mut 42;
|
||||
let xraw = x as *mut _;
|
||||
let xref = unsafe { &mut *xraw };
|
||||
let _val = *x; // invalidate xraw
|
||||
let _val = unsafe { *xraw }; // invalidate xref
|
||||
foo(xref); //~ ERROR does not exist on the stack
|
||||
}
|
@ -3,8 +3,8 @@ fn foo(_: &i32) {}
|
||||
|
||||
fn main() {
|
||||
let x = &mut 42;
|
||||
let xraw = &*x as *const _;
|
||||
let xraw = &*x as *const _ as *mut _;
|
||||
let xref = unsafe { &*xraw };
|
||||
*x = 42; // invalidate xraw
|
||||
foo(xref); //~ ERROR does not exist on the stack
|
||||
unsafe { *xraw = 42 }; // unfreeze
|
||||
foo(xref); //~ ERROR is not frozen
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
fn foo(x: &mut (i32, i32)) -> &mut i32 {
|
||||
let xraw = x as *mut (i32, i32);
|
||||
let ret = unsafe { &mut (*xraw).1 };
|
||||
let _val = *x; // invalidate xraw and its children
|
||||
let _val = unsafe { *xraw }; // invalidate xref
|
||||
ret //~ ERROR does not exist on the stack
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
fn foo(x: &mut (i32, i32)) -> &i32 {
|
||||
let xraw = x as *mut (i32, i32);
|
||||
let ret = unsafe { &(*xraw).1 };
|
||||
x.1 = 42; // invalidate xraw on the 2nd field
|
||||
ret //~ ERROR does not exist on the stack
|
||||
unsafe { *xraw = (42, 23) }; // unfreeze
|
||||
ret //~ ERROR is not frozen
|
||||
}
|
||||
|
||||
fn main() {
|
@ -1,3 +1,6 @@
|
||||
// FIXME still considering whether we are okay with this not being an error
|
||||
// ignore-test
|
||||
|
||||
static X: usize = 5;
|
||||
|
||||
#[allow(mutable_transmutes)]
|
@ -0,0 +1,12 @@
|
||||
// Make sure we cannot use raw ptrs that got transmuted from mutable references
|
||||
// (i.e, no EscapeToRaw happened).
|
||||
// We could, in principle, to EscapeToRaw lazily to allow this code, but that
|
||||
// would no alleviate the need for EscapeToRaw (see `ref_raw_int_raw` in
|
||||
// `run-pass/stacked-borrows.rs`), and thus increase overall complexity.
|
||||
use std::mem;
|
||||
|
||||
fn main() {
|
||||
let mut x: i32 = 42;
|
||||
let raw: *mut i32 = unsafe { mem::transmute(&mut x) };
|
||||
unsafe { *raw = 13; } //~ ERROR does not exist on the stack
|
||||
}
|
@ -1,10 +1,8 @@
|
||||
use std::mem;
|
||||
|
||||
// Make sure we cannot use raw ptrs to access a local that
|
||||
// has never been escaped to the raw world.
|
||||
// we took the direct address of.
|
||||
fn main() {
|
||||
let mut x = 42;
|
||||
let ptr = &mut x;
|
||||
let raw: *mut i32 = unsafe { mem::transmute(ptr) };
|
||||
let raw = &mut x as *mut i32 as usize as *mut i32;
|
||||
let _ptr = &mut x;
|
||||
unsafe { *raw = 13; } //~ ERROR does not exist on the stack
|
||||
}
|
@ -99,7 +99,13 @@ fn miri_pass(sysroot: &Path, path: &str, target: &str, host: &str, need_fullmir:
|
||||
flags.push(format!("--sysroot {}", sysroot.display()));
|
||||
flags.push("-Dwarnings -Dunused".to_owned()); // overwrite the -Aunused in compiletest-rs
|
||||
if opt {
|
||||
flags.push("-Zmir-opt-level=3".to_owned());
|
||||
// FIXME: We use opt level 1 because MIR inlining defeats the validation
|
||||
// whitelist.
|
||||
flags.push("-Zmir-opt-level=1".to_owned());
|
||||
}
|
||||
if !have_fullmir() {
|
||||
// Validation relies on the EscapeToRaw statements being emitted
|
||||
flags.push("-Zmiri-disable-validation".to_owned());
|
||||
}
|
||||
|
||||
let mut config = mk_config("ui");
|
||||
|
@ -1,13 +1,33 @@
|
||||
use std::cell::RefCell;
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn rc_refcell() {
|
||||
let r = Rc::new(RefCell::new(42));
|
||||
let r2 = r.clone();
|
||||
*r.borrow_mut() += 10;
|
||||
let x = *r.borrow();
|
||||
let x = *r2.borrow();
|
||||
assert_eq!(x, 52);
|
||||
}
|
||||
|
||||
fn rc_cell() {
|
||||
let r = Rc::new(Cell::new(42));
|
||||
let r2 = r.clone();
|
||||
let x = r.get();
|
||||
r2.set(x + x);
|
||||
assert_eq!(r.get(), 84);
|
||||
}
|
||||
|
||||
fn rc_refcell2() {
|
||||
let r = Rc::new(RefCell::new(42));
|
||||
let r2 = r.clone();
|
||||
*r.borrow_mut() += 10;
|
||||
let x = r2.borrow();
|
||||
let r3 = r.clone();
|
||||
let y = r3.borrow();
|
||||
assert_eq!((*x + *y)/2, 52);
|
||||
}
|
||||
|
||||
fn rc_raw() {
|
||||
let r = Rc::new(0);
|
||||
let r2 = Rc::into_raw(r.clone());
|
||||
@ -17,6 +37,14 @@ fn rc_raw() {
|
||||
assert!(Rc::try_unwrap(r2).is_ok());
|
||||
}
|
||||
|
||||
fn arc() {
|
||||
fn test() -> Arc<i32> {
|
||||
let a = Arc::new(42);
|
||||
a
|
||||
}
|
||||
assert_eq!(*test(), 42);
|
||||
}
|
||||
|
||||
// Make sure this Rc doesn't fall apart when touched
|
||||
fn check_unique_rc<T: ?Sized>(mut r: Rc<T>) {
|
||||
let r2 = r.clone();
|
||||
@ -34,6 +62,9 @@ fn rc_from() {
|
||||
|
||||
fn main() {
|
||||
rc_refcell();
|
||||
rc_refcell2();
|
||||
rc_cell();
|
||||
rc_raw();
|
||||
rc_from();
|
||||
arc();
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::cell::RefCell;
|
||||
|
||||
fn main() {
|
||||
fn lots_of_funny_borrows() {
|
||||
let c = RefCell::new(42);
|
||||
{
|
||||
let s1 = c.borrow();
|
||||
@ -31,3 +31,24 @@ fn main() {
|
||||
let _y: i32 = *s2;
|
||||
}
|
||||
}
|
||||
|
||||
fn aliasing_mut_and_shr() {
|
||||
fn inner(rc: &RefCell<i32>, aliasing: &mut i32) {
|
||||
*aliasing += 4;
|
||||
let _escape_to_raw = rc as *const _;
|
||||
*aliasing += 4;
|
||||
let _shr = &*rc;
|
||||
*aliasing += 4;
|
||||
}
|
||||
|
||||
let rc = RefCell::new(23);
|
||||
let mut bmut = rc.borrow_mut();
|
||||
inner(&rc, &mut *bmut);
|
||||
drop(bmut);
|
||||
assert_eq!(*rc.borrow(), 23+12);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
lots_of_funny_borrows();
|
||||
aliasing_mut_and_shr();
|
||||
}
|
||||
|
@ -3,15 +3,20 @@ fn main() {
|
||||
deref_partially_dangling_raw();
|
||||
read_does_not_invalidate1();
|
||||
read_does_not_invalidate2();
|
||||
ref_raw_int_raw();
|
||||
mut_shr_raw();
|
||||
mut_raw_then_mut_shr();
|
||||
mut_raw_mut();
|
||||
}
|
||||
|
||||
// Deref a raw ptr to access a field of a large struct, where the field
|
||||
// is allocated but not the entire struct is.
|
||||
// For now, we want to allow this.
|
||||
fn deref_partially_dangling_raw() {
|
||||
let x = (1, 1);
|
||||
let x = (1, 13);
|
||||
let xptr = &x as *const _ as *const (i32, i32, i32);
|
||||
let _val = unsafe { (*xptr).1 };
|
||||
let val = unsafe { (*xptr).1 };
|
||||
assert_eq!(val, 13);
|
||||
}
|
||||
|
||||
// Make sure that reading from an `&mut` does, like reborrowing to `&`,
|
||||
@ -23,7 +28,7 @@ fn read_does_not_invalidate1() {
|
||||
let _val = x.1; // we just read, this does NOT invalidate the reborrows.
|
||||
ret
|
||||
}
|
||||
foo(&mut (1, 2));
|
||||
assert_eq!(*foo(&mut (1, 2)), 2);
|
||||
}
|
||||
// Same as above, but this time we first create a raw, then read from `&mut`
|
||||
// and then freeze from the raw.
|
||||
@ -34,5 +39,63 @@ fn read_does_not_invalidate2() {
|
||||
let ret = unsafe { &(*xraw).1 };
|
||||
ret
|
||||
}
|
||||
foo(&mut (1, 2));
|
||||
assert_eq!(*foo(&mut (1, 2)), 2);
|
||||
}
|
||||
|
||||
// Just to make sure that casting a ref to raw, to int and back to raw
|
||||
// and only then using it works. This rules out ideas like "do escape-to-raw lazily":
|
||||
// After casting to int and back, we lost the tag that could have let us do that.
|
||||
fn ref_raw_int_raw() {
|
||||
let mut x = 3;
|
||||
let xref = &mut x;
|
||||
let xraw = xref as *mut i32 as usize as *mut i32;
|
||||
assert_eq!(unsafe { *xraw }, 3);
|
||||
}
|
||||
|
||||
// Creating a raw from a `&mut` through an `&` works, even if we
|
||||
// write through that raw.
|
||||
fn mut_shr_raw() {
|
||||
let mut x = 2;
|
||||
{
|
||||
let xref = &mut x;
|
||||
let xraw = &*xref as *const i32 as *mut i32;
|
||||
unsafe { *xraw = 4; }
|
||||
}
|
||||
assert_eq!(x, 4);
|
||||
}
|
||||
|
||||
// Escape a mut to raw, then share the same mut and use the share, then the raw.
|
||||
// That should work.
|
||||
fn mut_raw_then_mut_shr() {
|
||||
let mut x = 2;
|
||||
{
|
||||
let xref = &mut x;
|
||||
let xraw = &mut *xref as *mut _;
|
||||
let xshr = &*xref;
|
||||
assert_eq!(*xshr, 2);
|
||||
unsafe { *xraw = 4; }
|
||||
}
|
||||
assert_eq!(x, 4);
|
||||
}
|
||||
|
||||
// Ensure that if we derive from a mut a raw, and then from that a mut,
|
||||
// and then read through the original mut, that does not invalidate the raw.
|
||||
// This shows that the read-exception for `&mut` applies even if the `Shr` item
|
||||
// on the stack is not at the top.
|
||||
fn mut_raw_mut() {
|
||||
let mut x = 2;
|
||||
{
|
||||
let xref1 = &mut x;
|
||||
let xraw = xref1 as *mut _;
|
||||
let _xref2 = unsafe { &mut *xraw };
|
||||
let _val = *xref1;
|
||||
unsafe { *xraw = 4; }
|
||||
// we can now use both xraw and xref1, for reading
|
||||
assert_eq!(*xref1, 4);
|
||||
assert_eq!(unsafe { *xraw }, 4);
|
||||
assert_eq!(*xref1, 4);
|
||||
assert_eq!(unsafe { *xraw }, 4);
|
||||
// we cannot use xref2; see `compile-fail/stacked-borows/illegal_read4.rs`
|
||||
}
|
||||
assert_eq!(x, 4);
|
||||
}
|
||||
|
@ -1,34 +0,0 @@
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn rc_cell() -> Rc<Cell<i32>> {
|
||||
let r = Rc::new(Cell::new(42));
|
||||
let x = r.get();
|
||||
r.set(x + x);
|
||||
r
|
||||
}
|
||||
|
||||
fn rc_refcell() -> i32 {
|
||||
let r = Rc::new(RefCell::new(42));
|
||||
*r.borrow_mut() += 10;
|
||||
let x = r.borrow();
|
||||
let y = r.borrow();
|
||||
(*x + *y)/2
|
||||
}
|
||||
|
||||
fn arc() -> Arc<i32> {
|
||||
let a = Arc::new(42);
|
||||
a
|
||||
}
|
||||
|
||||
fn true_assert() {
|
||||
assert_eq!(1, 1);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
assert_eq!(*arc(), 42);
|
||||
assert_eq!(rc_cell().get(), 84);
|
||||
assert_eq!(rc_refcell(), 52);
|
||||
true_assert();
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user