2018-10-26 04:31:20 -05:00
|
|
|
use std::cell::RefCell;
|
2018-11-15 11:15:05 -06:00
|
|
|
use std::collections::HashSet;
|
|
|
|
use std::rc::Rc;
|
2018-10-16 11:01:50 -05:00
|
|
|
|
2018-11-05 09:05:17 -06:00
|
|
|
use rustc::ty::{self, layout::Size};
|
2018-11-09 03:53:28 -06:00
|
|
|
use rustc::hir::{Mutability, MutMutable, MutImmutable};
|
2018-10-16 11:01:50 -05:00
|
|
|
|
2018-11-01 02:55:03 -05:00
|
|
|
use crate::{
|
2018-11-13 05:48:20 -06:00
|
|
|
EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor,
|
2018-11-13 10:19:42 -06:00
|
|
|
MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra,
|
2018-11-28 02:33:33 -06:00
|
|
|
Pointer, Immediate, ImmTy, PlaceTy, MPlaceTy,
|
2018-10-16 11:01:50 -05:00
|
|
|
};
|
2018-10-16 04:21:38 -05:00
|
|
|
|
|
|
|
pub type Timestamp = u64;
|
2018-11-15 11:15:05 -06:00
|
|
|
pub type CallId = u64;
|
2018-10-16 04:21:38 -05:00
|
|
|
|
2018-11-05 09:05:17 -06:00
|
|
|
/// Information about which kind of borrow was used to create the reference this is tagged
|
|
|
|
/// with.
|
2018-10-16 04:21:38 -05:00
|
|
|
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
|
|
|
pub enum Borrow {
|
2018-11-05 09:05:17 -06:00
|
|
|
/// A unique (mutable) reference.
|
|
|
|
Uniq(Timestamp),
|
|
|
|
/// A shared reference. This is also used by raw pointers, which do not track details
|
|
|
|
/// of how or when they were created, hence the timestamp is optional.
|
|
|
|
/// Shr(Some(_)) does NOT mean that the destination of this reference is frozen;
|
|
|
|
/// that depends on the type! Only those parts outside of an `UnsafeCell` are actually
|
|
|
|
/// frozen.
|
|
|
|
Shr(Option<Timestamp>),
|
2018-10-16 04:21:38 -05:00
|
|
|
}
|
|
|
|
|
2018-10-16 11:01:50 -05:00
|
|
|
impl Borrow {
|
|
|
|
#[inline(always)]
|
2018-11-09 03:53:28 -06:00
|
|
|
pub fn is_shared(self) -> bool {
|
2018-10-24 04:39:31 -05:00
|
|
|
match self {
|
2018-11-05 09:05:17 -06:00
|
|
|
Borrow::Shr(_) => true,
|
2018-10-24 04:39:31 -05:00
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
2018-11-09 03:53:28 -06:00
|
|
|
pub fn is_unique(self) -> bool {
|
2018-10-16 11:01:50 -05:00
|
|
|
match self {
|
2018-11-05 09:05:17 -06:00
|
|
|
Borrow::Uniq(_) => true,
|
2018-10-16 11:01:50 -05:00
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-30 10:46:28 -05:00
|
|
|
impl Default for Borrow {
|
|
|
|
fn default() -> Self {
|
2018-11-05 09:05:17 -06:00
|
|
|
Borrow::Shr(None)
|
2018-10-30 10:46:28 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 09:05:17 -06:00
|
|
|
/// An item in the per-location borrow stack
|
2018-10-16 04:21:38 -05:00
|
|
|
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
|
|
|
pub enum BorStackItem {
|
2018-11-05 09:05:17 -06:00
|
|
|
/// Indicates the unique reference that may mutate.
|
|
|
|
Uniq(Timestamp),
|
|
|
|
/// Indicates that the location has been shared. Used for raw pointers, but
|
|
|
|
/// also for shared references. The latter *additionally* get frozen
|
|
|
|
/// when there is no `UnsafeCell`.
|
|
|
|
Shr,
|
2018-10-17 08:15:53 -05:00
|
|
|
/// A barrier, tracking the function it belongs to by its index on the call stack
|
2018-11-15 12:49:00 -06:00
|
|
|
FnBarrier(CallId)
|
2018-10-16 04:21:38 -05:00
|
|
|
}
|
2018-10-16 11:01:50 -05:00
|
|
|
|
2018-11-05 09:05:17 -06:00
|
|
|
/// Extra per-location state
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct Stack {
|
|
|
|
borrows: Vec<BorStackItem>, // used as a stack; never empty
|
|
|
|
frozen_since: Option<Timestamp>, // virtual frozen "item" on top of the stack
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Stack {
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn is_frozen(&self) -> bool {
|
|
|
|
self.frozen_since.is_some()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// What kind of reference is being used?
|
2018-10-19 09:07:40 -05:00
|
|
|
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
2018-11-09 03:53:28 -06:00
|
|
|
pub enum RefKind {
|
|
|
|
/// &mut
|
|
|
|
Unique,
|
|
|
|
/// & without interior mutability
|
|
|
|
Frozen,
|
|
|
|
/// * (raw pointer) or & to `UnsafeCell`
|
2018-10-19 09:07:40 -05:00
|
|
|
Raw,
|
|
|
|
}
|
|
|
|
|
2018-11-21 08:25:47 -06:00
|
|
|
/// What kind of access is being performed?
|
|
|
|
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
|
|
|
pub enum AccessKind {
|
|
|
|
Read,
|
|
|
|
Write,
|
|
|
|
Dealloc,
|
|
|
|
}
|
|
|
|
|
2018-11-15 11:15:05 -06:00
|
|
|
/// Extra global state in the memory, available to the memory access hooks
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct BarrierTracking {
|
|
|
|
next_id: CallId,
|
|
|
|
active_calls: HashSet<CallId>,
|
|
|
|
}
|
|
|
|
pub type MemoryState = Rc<RefCell<BarrierTracking>>;
|
|
|
|
|
|
|
|
impl Default for BarrierTracking {
|
|
|
|
fn default() -> Self {
|
|
|
|
BarrierTracking {
|
|
|
|
next_id: 0,
|
|
|
|
active_calls: HashSet::default(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BarrierTracking {
|
|
|
|
pub fn new_call(&mut self) -> CallId {
|
|
|
|
let id = self.next_id;
|
|
|
|
trace!("new_call: Assigning ID {}", id);
|
|
|
|
self.active_calls.insert(id);
|
|
|
|
self.next_id += 1;
|
|
|
|
id
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn end_call(&mut self, id: CallId) {
|
|
|
|
assert!(self.active_calls.remove(&id));
|
|
|
|
}
|
2018-11-15 12:49:00 -06:00
|
|
|
|
|
|
|
fn is_active(&self, id: CallId) -> bool {
|
|
|
|
self.active_calls.contains(&id)
|
|
|
|
}
|
2018-11-15 11:15:05 -06:00
|
|
|
}
|
|
|
|
|
2018-10-16 11:01:50 -05:00
|
|
|
/// Extra global machine state
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct State {
|
2018-10-26 04:31:20 -05:00
|
|
|
clock: Timestamp
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
|
2018-11-15 11:15:05 -06:00
|
|
|
impl Default for State {
|
|
|
|
fn default() -> Self {
|
2018-10-26 04:31:20 -05:00
|
|
|
State { clock: 0 }
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
2018-11-15 11:15:05 -06:00
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
|
2018-11-15 11:15:05 -06:00
|
|
|
impl State {
|
2018-11-09 03:53:28 -06:00
|
|
|
fn increment_clock(&mut self) -> Timestamp {
|
|
|
|
let val = self.clock;
|
|
|
|
self.clock = val + 1;
|
|
|
|
val
|
|
|
|
}
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Extra per-allocation state
|
2018-11-15 06:29:55 -06:00
|
|
|
#[derive(Clone, Debug)]
|
2018-10-16 11:01:50 -05:00
|
|
|
pub struct Stacks {
|
2018-10-26 04:31:20 -05:00
|
|
|
// Even reading memory can have effects on the stack, so we need a `RefCell` here.
|
2018-10-16 11:01:50 -05:00
|
|
|
stacks: RefCell<RangeMap<Stack>>,
|
2018-11-15 11:15:05 -06:00
|
|
|
barrier_tracking: MemoryState,
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Core per-location operations: deref, access, create.
|
|
|
|
/// We need to make at least the following things true:
|
|
|
|
///
|
|
|
|
/// U1: After creating a Uniq, it is at the top (+unfrozen).
|
|
|
|
/// U2: If the top is Uniq (+unfrozen), accesses must be through that Uniq or pop it.
|
|
|
|
/// U3: If an access (deref sufficient?) happens with a Uniq, it requires the Uniq to be in the stack.
|
|
|
|
///
|
|
|
|
/// F1: After creating a &, the parts outside `UnsafeCell` are frozen.
|
|
|
|
/// F2: If a write access happens, it unfreezes.
|
|
|
|
/// F3: If an access (well, a deref) happens with an & outside `UnsafeCell`, it requires the location to still be frozen.
|
2018-10-16 11:01:50 -05:00
|
|
|
impl<'tcx> Stack {
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Deref `bor`: Check if the location is frozen and the tag in the stack.
|
|
|
|
/// This dos *not* constitute an access! "Deref" refers to the `*` operator
|
|
|
|
/// in Rust, and includs cases like `&*x` or `(*x).foo` where no or only part
|
|
|
|
/// of the memory actually gets accessed. Also we cannot know if we are
|
|
|
|
/// going to read or write.
|
|
|
|
/// Returns the index of the item we matched, `None` if it was the frozen one.
|
|
|
|
/// `kind` indicates which kind of reference is being dereferenced.
|
2018-11-15 12:49:00 -06:00
|
|
|
fn deref(
|
|
|
|
&self,
|
|
|
|
bor: Borrow,
|
|
|
|
kind: RefKind,
|
|
|
|
) -> Result<Option<usize>, String> {
|
2018-11-21 09:01:39 -06:00
|
|
|
// Exclude unique ref with frozen tag.
|
|
|
|
if let (RefKind::Unique, Borrow::Shr(Some(_))) = (kind, bor) {
|
2018-11-21 09:02:38 -06:00
|
|
|
return Err(format!("Encountered mutable reference with frozen tag ({:?})", bor));
|
2018-11-21 08:44:47 -06:00
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
// Checks related to freezing
|
|
|
|
match bor {
|
|
|
|
Borrow::Shr(Some(bor_t)) if kind == RefKind::Frozen => {
|
|
|
|
// We need the location to be frozen. This ensures F3.
|
|
|
|
let frozen = self.frozen_since.map_or(false, |itm_t| itm_t <= bor_t);
|
|
|
|
return if frozen { Ok(None) } else {
|
|
|
|
Err(format!("Location is not frozen long enough"))
|
2018-11-05 09:05:17 -06:00
|
|
|
}
|
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
Borrow::Shr(_) if self.frozen_since.is_some() => {
|
|
|
|
return Ok(None) // Shared deref to frozen location, looking good
|
|
|
|
}
|
|
|
|
_ => {} // Not sufficient, go on looking.
|
2018-11-05 09:05:17 -06:00
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
// If we got here, we have to look for our item in the stack.
|
|
|
|
for (idx, &itm) in self.borrows.iter().enumerate().rev() {
|
2018-11-05 09:05:17 -06:00
|
|
|
match (itm, bor) {
|
|
|
|
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
|
2018-11-09 03:53:28 -06:00
|
|
|
// Found matching unique item. This satisfies U3.
|
2018-11-05 09:05:17 -06:00
|
|
|
return Ok(Some(idx))
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
2018-11-05 09:05:17 -06:00
|
|
|
(BorStackItem::Shr, Borrow::Shr(_)) => {
|
|
|
|
// Found matching shared/raw item.
|
|
|
|
return Ok(Some(idx))
|
|
|
|
}
|
2018-11-15 12:49:00 -06:00
|
|
|
// Go on looking. We ignore barriers! When an `&mut` and an `&` alias,
|
|
|
|
// dereferencing the `&` is still possible (to reborrow), but doing
|
|
|
|
// an access is not.
|
2018-11-05 09:05:17 -06:00
|
|
|
_ => {}
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
// If we got here, we did not find our item. We have to error to satisfy U3.
|
2018-11-15 12:49:00 -06:00
|
|
|
Err(format!("Borrow being dereferenced ({:?}) does not exist on the stack", bor))
|
2018-10-23 08:59:50 -05:00
|
|
|
}
|
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Perform an actual memory access using `bor`. We do not know any types here
|
|
|
|
/// or whether things should be frozen, but we *do* know if this is reading
|
|
|
|
/// or writing.
|
2018-11-15 12:49:00 -06:00
|
|
|
fn access(
|
|
|
|
&mut self,
|
|
|
|
bor: Borrow,
|
2018-11-21 08:25:47 -06:00
|
|
|
kind: AccessKind,
|
2018-11-15 12:49:00 -06:00
|
|
|
barrier_tracking: &BarrierTracking,
|
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-09 03:53:28 -06:00
|
|
|
// Check if we can match the frozen "item".
|
|
|
|
// Not possible on writes!
|
2018-11-05 09:05:17 -06:00
|
|
|
if self.is_frozen() {
|
2018-11-21 08:25:47 -06:00
|
|
|
if kind == AccessKind::Read {
|
2018-11-09 03:53:28 -06:00
|
|
|
// When we are frozen, we just accept all reads. No harm in this.
|
|
|
|
// The deref already checked that `Uniq` items are in the stack, and that
|
|
|
|
// the location is frozen if it should be.
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
trace!("access: Unfreezing");
|
2018-11-05 09:05:17 -06:00
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
// Unfreeze on writes. This ensures F2.
|
2018-11-05 09:05:17 -06:00
|
|
|
self.frozen_since = None;
|
2018-11-09 03:53:28 -06:00
|
|
|
// Pop the stack until we have something matching.
|
|
|
|
while let Some(&itm) = self.borrows.last() {
|
|
|
|
match (itm, bor) {
|
2018-11-15 12:49:00 -06:00
|
|
|
(BorStackItem::FnBarrier(call), _) if barrier_tracking.is_active(call) => {
|
|
|
|
return err!(MachineError(format!(
|
|
|
|
"Stopping looking for borrow being accessed ({:?}) because of barrier ({})",
|
|
|
|
bor, call
|
|
|
|
)))
|
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
|
2018-11-21 08:25:47 -06:00
|
|
|
// Found matching unique item. Continue after the match.
|
2018-11-09 03:53:28 -06:00
|
|
|
}
|
2018-11-21 08:25:47 -06:00
|
|
|
(BorStackItem::Shr, _) if kind == AccessKind::Read => {
|
2018-11-09 03:53:28 -06:00
|
|
|
// When reading, everything can use a shared item!
|
|
|
|
// We do not want to do this when writing: Writing to an `&mut`
|
|
|
|
// should reaffirm its exclusivity (i.e., make sure it is
|
2018-11-21 08:25:47 -06:00
|
|
|
// on top of the stack). Continue after the match.
|
2018-11-09 03:53:28 -06:00
|
|
|
}
|
|
|
|
(BorStackItem::Shr, Borrow::Shr(_)) => {
|
2018-11-21 08:25:47 -06:00
|
|
|
// Found matching shared item. Continue after the match.
|
2018-11-09 03:53:28 -06:00
|
|
|
}
|
|
|
|
_ => {
|
2018-11-21 08:25:47 -06:00
|
|
|
// Pop this, go on. This ensures U2.
|
2018-11-09 03:53:28 -06:00
|
|
|
let itm = self.borrows.pop().unwrap();
|
|
|
|
trace!("access: Popping {:?}", itm);
|
2018-11-21 08:25:47 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we got here, we found a matching item. Congratulations!
|
|
|
|
// However, we are not done yet: If this access is deallocating, we must make sure
|
|
|
|
// there are no active barriers remaining on the stack.
|
|
|
|
if kind == AccessKind::Dealloc {
|
|
|
|
for &itm in self.borrows.iter().rev() {
|
|
|
|
match itm {
|
|
|
|
BorStackItem::FnBarrier(call) if barrier_tracking.is_active(call) => {
|
|
|
|
return err!(MachineError(format!(
|
|
|
|
"Deallocating with active barrier ({})", call
|
|
|
|
)))
|
|
|
|
}
|
|
|
|
_ => {},
|
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
}
|
|
|
|
}
|
2018-11-21 08:25:47 -06:00
|
|
|
// NOW we are done.
|
|
|
|
return Ok(())
|
2018-10-23 08:59:50 -05:00
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
// If we got here, we did not find our item.
|
|
|
|
err!(MachineError(format!(
|
2018-11-15 12:49:00 -06:00
|
|
|
"Borrow being accessed ({:?}) does not exist on the stack",
|
2018-11-09 03:53:28 -06:00
|
|
|
bor
|
|
|
|
)))
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:05:17 -06:00
|
|
|
/// Initiate `bor`; mostly this means pushing.
|
2018-10-29 13:48:43 -05:00
|
|
|
/// This operation cannot fail; it is up to the caller to ensure that the precondition
|
2018-11-07 09:56:25 -06:00
|
|
|
/// is met: We cannot push `Uniq` onto frozen stacks.
|
2018-11-09 03:53:28 -06:00
|
|
|
/// `kind` indicates which kind of reference is being created.
|
|
|
|
fn create(&mut self, bor: Borrow, kind: RefKind) {
|
2018-11-13 10:05:47 -06:00
|
|
|
if self.frozen_since.is_some() {
|
2018-11-15 12:49:00 -06:00
|
|
|
// A frozen location? Possible if we create a barrier, then push again.
|
|
|
|
assert!(bor.is_shared(), "We should never try creating a unique borrow for a frozen stack");
|
|
|
|
trace!("create: Not doing anything on frozen location");
|
|
|
|
return;
|
2018-11-13 10:05:47 -06:00
|
|
|
}
|
2018-11-15 12:49:00 -06:00
|
|
|
// First, push. We do this even if we will later freeze, because we
|
|
|
|
// will allow mutation of shared data at the expense of unfreezing.
|
2018-11-13 10:05:47 -06:00
|
|
|
let itm = match bor {
|
|
|
|
Borrow::Uniq(t) => BorStackItem::Uniq(t),
|
|
|
|
Borrow::Shr(_) => BorStackItem::Shr,
|
|
|
|
};
|
|
|
|
if *self.borrows.last().unwrap() == itm {
|
2018-11-15 12:49:00 -06:00
|
|
|
// This is just an optimization, no functional change: Avoid stacking
|
|
|
|
// multiple `Shr` on top of each other.
|
2018-11-13 10:05:47 -06:00
|
|
|
assert!(bor.is_shared());
|
|
|
|
trace!("create: Sharing a shared location is a NOP");
|
2018-11-05 09:05:17 -06:00
|
|
|
} else {
|
2018-11-13 10:05:47 -06:00
|
|
|
// This ensures U1.
|
|
|
|
trace!("create: Pushing {:?}", itm);
|
|
|
|
self.borrows.push(itm);
|
|
|
|
}
|
|
|
|
// Then, maybe freeze. This is part 2 of ensuring F1.
|
|
|
|
if kind == RefKind::Frozen {
|
|
|
|
let bor_t = match bor {
|
|
|
|
Borrow::Shr(Some(t)) => t,
|
|
|
|
_ => bug!("Creating illegal borrow {:?} for frozen ref", bor),
|
2018-11-05 09:05:17 -06:00
|
|
|
};
|
2018-11-13 10:05:47 -06:00
|
|
|
trace!("create: Freezing");
|
|
|
|
self.frozen_since = Some(bor_t);
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
}
|
2018-11-15 12:49:00 -06:00
|
|
|
|
|
|
|
/// Add a barrier
|
|
|
|
fn barrier(&mut self, call: CallId) {
|
|
|
|
let itm = BorStackItem::FnBarrier(call);
|
|
|
|
if *self.borrows.last().unwrap() == itm {
|
|
|
|
// This is just an optimization, no functional change: Avoid stacking
|
|
|
|
// multiple identical barriers on top of each other.
|
|
|
|
// This can happen when a function receives several shared references
|
|
|
|
// that overlap.
|
|
|
|
trace!("barrier: Avoiding redundant extra barrier");
|
|
|
|
} else {
|
|
|
|
trace!("barrier: Pushing barrier for call {}", call);
|
|
|
|
self.borrows.push(itm);
|
|
|
|
}
|
|
|
|
}
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Higher-level per-location operations: deref, access, reborrow.
|
2018-10-17 08:15:53 -05:00
|
|
|
impl<'tcx> Stacks {
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Check that this stack is fine with being dereferenced
|
|
|
|
fn deref(
|
2018-10-17 08:15:53 -05:00
|
|
|
&self,
|
|
|
|
ptr: Pointer<Borrow>,
|
|
|
|
size: Size,
|
2018-11-09 03:53:28 -06:00
|
|
|
kind: RefKind,
|
2018-10-17 08:15:53 -05:00
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-09 03:53:28 -06:00
|
|
|
trace!("deref for tag {:?} as {:?}: {:?}, size {}",
|
|
|
|
ptr.tag, kind, ptr, size.bytes());
|
2018-11-15 06:29:55 -06:00
|
|
|
let stacks = self.stacks.borrow();
|
|
|
|
for stack in stacks.iter(ptr.offset, size) {
|
2018-11-09 03:53:28 -06:00
|
|
|
stack.deref(ptr.tag, kind).map_err(EvalErrorKind::MachineError)?;
|
2018-10-17 08:15:53 -05:00
|
|
|
}
|
2018-11-05 09:05:17 -06:00
|
|
|
Ok(())
|
|
|
|
}
|
2018-10-17 08:15:53 -05:00
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// `ptr` got used, reflect that in the stack.
|
|
|
|
fn access(
|
2018-11-05 09:05:17 -06:00
|
|
|
&self,
|
|
|
|
ptr: Pointer<Borrow>,
|
|
|
|
size: Size,
|
2018-11-21 08:25:47 -06:00
|
|
|
kind: AccessKind,
|
2018-11-09 03:53:28 -06:00
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-21 08:25:47 -06:00
|
|
|
trace!("{:?} access of tag {:?}: {:?}, size {}", kind, ptr.tag, ptr, size.bytes());
|
2018-11-15 07:25:23 -06:00
|
|
|
// Even reads can have a side-effect, by invalidating other references.
|
|
|
|
// This is fundamentally necessary since `&mut` asserts that there
|
|
|
|
// are no accesses through other references, not even reads.
|
2018-11-21 08:25:47 -06:00
|
|
|
let barrier_tracking = self.barrier_tracking.borrow();
|
2018-11-05 09:05:17 -06:00
|
|
|
let mut stacks = self.stacks.borrow_mut();
|
|
|
|
for stack in stacks.iter_mut(ptr.offset, size) {
|
2018-11-21 08:25:47 -06:00
|
|
|
stack.access(ptr.tag, kind, &*barrier_tracking)?;
|
2018-11-05 09:05:17 -06:00
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
Ok(())
|
2018-10-17 08:15:53 -05:00
|
|
|
}
|
2018-10-22 11:01:32 -05:00
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Reborrow the given pointer to the new tag for the given kind of reference.
|
2018-11-15 07:25:23 -06:00
|
|
|
/// This works on `&self` because we might encounter references to constant memory.
|
2018-11-09 03:53:28 -06:00
|
|
|
fn reborrow(
|
2018-11-05 09:05:17 -06:00
|
|
|
&self,
|
|
|
|
ptr: Pointer<Borrow>,
|
|
|
|
size: Size,
|
2018-11-15 12:49:00 -06:00
|
|
|
mut barrier: Option<CallId>,
|
2018-11-09 03:53:28 -06:00
|
|
|
new_bor: Borrow,
|
|
|
|
new_kind: RefKind,
|
2018-11-05 09:05:17 -06:00
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-16 01:40:00 -06:00
|
|
|
assert_eq!(new_bor.is_unique(), new_kind == RefKind::Unique);
|
2018-11-09 03:53:28 -06:00
|
|
|
trace!("reborrow for tag {:?} to {:?} as {:?}: {:?}, size {}",
|
|
|
|
ptr.tag, new_bor, new_kind, ptr, size.bytes());
|
2018-11-15 12:49:00 -06:00
|
|
|
if new_kind == RefKind::Raw {
|
|
|
|
// No barrier for raw, including `&UnsafeCell`. They can rightfully
|
|
|
|
// alias with `&mut`.
|
2018-11-21 08:25:47 -06:00
|
|
|
// FIXME: This means that the `dereferencable` attribute on non-frozen shared
|
|
|
|
// references is incorrect! They are dereferencable when the function is
|
2018-11-28 02:58:23 -06:00
|
|
|
// called, but might become non-dereferencable during the course of execution.
|
2018-11-21 08:25:47 -06:00
|
|
|
// Also see [1], [2].
|
|
|
|
//
|
|
|
|
// [1]: <https://internals.rust-lang.org/t/
|
|
|
|
// is-it-possible-to-be-memory-safe-with-deallocated-self/8457/8>,
|
|
|
|
// [2]: <https://lists.llvm.org/pipermail/llvm-dev/2018-July/124555.html>
|
2018-11-15 12:49:00 -06:00
|
|
|
barrier = None;
|
|
|
|
}
|
2018-11-21 08:25:47 -06:00
|
|
|
let barrier_tracking = self.barrier_tracking.borrow();
|
2018-11-05 09:05:17 -06:00
|
|
|
let mut stacks = self.stacks.borrow_mut();
|
|
|
|
for stack in stacks.iter_mut(ptr.offset, size) {
|
2018-11-09 03:53:28 -06:00
|
|
|
// Access source `ptr`, create new ref.
|
|
|
|
let ptr_idx = stack.deref(ptr.tag, new_kind).map_err(EvalErrorKind::MachineError)?;
|
2018-11-13 06:39:03 -06:00
|
|
|
// If we can deref the new tag already, and if that tag lives higher on
|
|
|
|
// the stack than the one we come from, just use that.
|
|
|
|
// IOW, we check if `new_bor` *already* is "derived from" `ptr.tag`.
|
|
|
|
// This also checks frozenness, if required.
|
2018-11-15 12:49:00 -06:00
|
|
|
let bor_redundant = barrier.is_none() &&
|
|
|
|
match (ptr_idx, stack.deref(new_bor, new_kind)) {
|
|
|
|
// If the new borrow works with the frozen item, or else if it lives
|
|
|
|
// above the old one in the stack, our job here is done.
|
|
|
|
(_, Ok(None)) => true,
|
|
|
|
(Some(ptr_idx), Ok(Some(new_idx))) if new_idx >= ptr_idx => true,
|
|
|
|
// Otherwise we need to create a new borrow.
|
|
|
|
_ => false,
|
|
|
|
};
|
2018-11-13 07:49:04 -06:00
|
|
|
if bor_redundant {
|
2018-11-13 06:39:03 -06:00
|
|
|
assert!(new_bor.is_shared(), "A unique reborrow can never be redundant");
|
2018-11-13 07:49:04 -06:00
|
|
|
trace!("reborrow is redundant");
|
2018-11-13 06:39:03 -06:00
|
|
|
continue;
|
2018-11-07 09:56:25 -06:00
|
|
|
}
|
2018-11-13 06:39:03 -06:00
|
|
|
// We need to do some actual work.
|
2018-11-21 08:25:47 -06:00
|
|
|
let access_kind = if new_kind == RefKind::Unique {
|
|
|
|
AccessKind::Write
|
|
|
|
} else {
|
|
|
|
AccessKind::Read
|
|
|
|
};
|
|
|
|
stack.access(ptr.tag, access_kind, &*barrier_tracking)?;
|
2018-11-15 12:49:00 -06:00
|
|
|
if let Some(call) = barrier {
|
|
|
|
stack.barrier(call);
|
|
|
|
}
|
2018-11-09 03:53:28 -06:00
|
|
|
stack.create(new_bor, new_kind);
|
2018-11-05 09:05:17 -06:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Hooks and glue
|
2018-11-15 11:15:05 -06:00
|
|
|
impl AllocationExtra<Borrow, MemoryState> for Stacks {
|
2018-11-15 06:29:55 -06:00
|
|
|
#[inline(always)]
|
2018-11-15 11:15:05 -06:00
|
|
|
fn memory_allocated<'tcx>(size: Size, extra: &MemoryState) -> Self {
|
|
|
|
let stack = Stack {
|
|
|
|
borrows: vec![BorStackItem::Shr],
|
|
|
|
frozen_since: None,
|
|
|
|
};
|
2018-11-15 06:29:55 -06:00
|
|
|
Stacks {
|
2018-11-15 11:15:05 -06:00
|
|
|
stacks: RefCell::new(RangeMap::new(size, stack)),
|
|
|
|
barrier_tracking: Rc::clone(extra),
|
2018-11-15 06:29:55 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-29 13:48:43 -05:00
|
|
|
#[inline(always)]
|
2018-11-12 01:54:12 -06:00
|
|
|
fn memory_read<'tcx>(
|
|
|
|
alloc: &Allocation<Borrow, Stacks>,
|
2018-10-29 13:48:43 -05:00
|
|
|
ptr: Pointer<Borrow>,
|
|
|
|
size: Size,
|
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-21 08:25:47 -06:00
|
|
|
alloc.extra.access(ptr, size, AccessKind::Read)
|
2018-10-29 13:48:43 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
2018-11-12 01:54:12 -06:00
|
|
|
fn memory_written<'tcx>(
|
|
|
|
alloc: &mut Allocation<Borrow, Stacks>,
|
2018-10-29 13:48:43 -05:00
|
|
|
ptr: Pointer<Borrow>,
|
|
|
|
size: Size,
|
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-21 08:25:47 -06:00
|
|
|
alloc.extra.access(ptr, size, AccessKind::Write)
|
2018-10-29 13:48:43 -05:00
|
|
|
}
|
|
|
|
|
2018-11-12 01:54:12 -06:00
|
|
|
#[inline(always)]
|
2018-11-14 09:03:38 -06:00
|
|
|
fn memory_deallocated<'tcx>(
|
|
|
|
alloc: &mut Allocation<Borrow, Stacks>,
|
2018-10-29 13:48:43 -05:00
|
|
|
ptr: Pointer<Borrow>,
|
|
|
|
size: Size,
|
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-21 08:25:47 -06:00
|
|
|
alloc.extra.access(ptr, size, AccessKind::Dealloc)
|
2018-10-29 13:48:43 -05:00
|
|
|
}
|
2018-11-14 09:03:38 -06:00
|
|
|
}
|
2018-10-29 13:48:43 -05:00
|
|
|
|
2018-11-14 09:03:38 -06:00
|
|
|
impl<'tcx> Stacks {
|
2018-11-05 09:05:17 -06:00
|
|
|
/// Pushes the first item to the stacks.
|
2018-11-15 12:49:00 -06:00
|
|
|
pub(crate) fn first_item(
|
2018-10-22 11:01:32 -05:00
|
|
|
&mut self,
|
2018-11-05 09:05:17 -06:00
|
|
|
itm: BorStackItem,
|
2018-10-22 11:01:32 -05:00
|
|
|
size: Size
|
|
|
|
) {
|
|
|
|
for stack in self.stacks.get_mut().iter_mut(Size::ZERO, size) {
|
2018-11-05 09:05:17 -06:00
|
|
|
assert!(stack.borrows.len() == 1);
|
|
|
|
assert_eq!(stack.borrows.pop().unwrap(), BorStackItem::Shr);
|
|
|
|
stack.borrows.push(itm);
|
2018-10-22 11:01:32 -05:00
|
|
|
}
|
|
|
|
}
|
2018-10-17 08:15:53 -05:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:05:17 -06:00
|
|
|
|
|
|
|
|
2018-10-16 11:01:50 -05:00
|
|
|
pub trait EvalContextExt<'tcx> {
|
2018-11-17 02:54:58 -06:00
|
|
|
fn ptr_dereference(
|
2018-10-16 11:01:50 -05:00
|
|
|
&self,
|
2018-11-05 09:05:17 -06:00
|
|
|
place: MPlaceTy<'tcx, Borrow>,
|
2018-10-18 09:59:08 -05:00
|
|
|
size: Size,
|
2018-11-09 03:53:28 -06:00
|
|
|
mutability: Option<Mutability>,
|
2018-11-17 02:54:58 -06:00
|
|
|
) -> EvalResult<'tcx>;
|
2018-10-22 11:01:32 -05:00
|
|
|
|
|
|
|
fn tag_new_allocation(
|
|
|
|
&mut self,
|
|
|
|
id: AllocId,
|
|
|
|
kind: MemoryKind<MiriMemoryKind>,
|
|
|
|
) -> Borrow;
|
2018-10-24 10:17:44 -05:00
|
|
|
|
2018-11-15 07:25:23 -06:00
|
|
|
/// Reborrow the given place, returning the newly tagged ptr to it.
|
2018-11-09 03:53:28 -06:00
|
|
|
fn reborrow(
|
2018-11-15 07:25:23 -06:00
|
|
|
&mut self,
|
|
|
|
place: MPlaceTy<'tcx, Borrow>,
|
|
|
|
size: Size,
|
2018-11-15 12:49:00 -06:00
|
|
|
fn_barrier: bool,
|
2018-11-15 07:25:23 -06:00
|
|
|
new_bor: Borrow
|
2018-11-28 02:33:33 -06:00
|
|
|
) -> EvalResult<'tcx>;
|
2018-11-15 07:25:23 -06:00
|
|
|
|
|
|
|
/// Retag an indidual pointer, returning the retagged version.
|
|
|
|
fn retag_reference(
|
2018-11-07 07:56:25 -06:00
|
|
|
&mut self,
|
|
|
|
ptr: ImmTy<'tcx, Borrow>,
|
2018-11-09 03:53:28 -06:00
|
|
|
mutbl: Mutability,
|
2018-11-15 12:49:00 -06:00
|
|
|
fn_barrier: bool,
|
2018-11-28 02:33:33 -06:00
|
|
|
two_phase: bool,
|
2018-11-07 07:56:25 -06:00
|
|
|
) -> EvalResult<'tcx, Immediate<Borrow>>;
|
|
|
|
|
2018-10-24 10:17:44 -05:00
|
|
|
fn retag(
|
|
|
|
&mut self,
|
|
|
|
fn_entry: bool,
|
2018-11-28 02:33:33 -06:00
|
|
|
two_phase: bool,
|
2018-10-24 10:17:44 -05:00
|
|
|
place: PlaceTy<'tcx, Borrow>
|
|
|
|
) -> EvalResult<'tcx>;
|
2018-10-16 11:01:50 -05:00
|
|
|
|
2018-11-07 07:56:25 -06:00
|
|
|
fn escape_to_raw(
|
2018-10-26 04:31:20 -05:00
|
|
|
&mut self,
|
2018-11-05 09:05:17 -06:00
|
|
|
place: MPlaceTy<'tcx, Borrow>,
|
2018-10-26 04:31:20 -05:00
|
|
|
size: Size,
|
2018-11-07 07:56:25 -06:00
|
|
|
) -> EvalResult<'tcx>;
|
|
|
|
}
|
2018-10-16 11:01:50 -05:00
|
|
|
|
2018-11-07 07:56:25 -06:00
|
|
|
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
|
2018-11-07 09:56:25 -06:00
|
|
|
fn tag_new_allocation(
|
|
|
|
&mut self,
|
|
|
|
id: AllocId,
|
|
|
|
kind: MemoryKind<MiriMemoryKind>,
|
|
|
|
) -> Borrow {
|
|
|
|
let time = match kind {
|
|
|
|
MemoryKind::Stack => {
|
|
|
|
// New unique borrow. This `Uniq` is not accessible by the program,
|
|
|
|
// so it will only ever be used when using the local directly (i.e.,
|
|
|
|
// not through a pointer). IOW, whenever we directly use a local this will pop
|
|
|
|
// everything else off the stack, invalidating all previous pointers
|
|
|
|
// and, in particular, *all* raw pointers. This subsumes the explicit
|
|
|
|
// `reset` which the blog post [1] says to perform when accessing a local.
|
|
|
|
//
|
|
|
|
// [1] https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html
|
|
|
|
self.machine.stacked_borrows.increment_clock()
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
// Nothing to do for everything else
|
|
|
|
return Borrow::default()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
// Make this the active borrow for this allocation
|
|
|
|
let alloc = self.memory_mut().get_mut(id).expect("This is a new allocation, it must still exist");
|
|
|
|
let size = Size::from_bytes(alloc.bytes.len() as u64);
|
|
|
|
alloc.extra.first_item(BorStackItem::Uniq(time), size);
|
|
|
|
Borrow::Uniq(time)
|
|
|
|
}
|
|
|
|
|
2018-11-09 03:53:28 -06:00
|
|
|
/// Called for value-to-place conversion. `mutability` is `None` for raw pointers.
|
2018-10-19 09:07:40 -05:00
|
|
|
///
|
|
|
|
/// Note that this does NOT mean that all this memory will actually get accessed/referenced!
|
|
|
|
/// We could be in the middle of `&(*var).1`.
|
2018-11-17 02:54:58 -06:00
|
|
|
fn ptr_dereference(
|
2018-10-16 11:01:50 -05:00
|
|
|
&self,
|
2018-11-05 09:05:17 -06:00
|
|
|
place: MPlaceTy<'tcx, Borrow>,
|
2018-10-18 09:59:08 -05:00
|
|
|
size: Size,
|
2018-11-09 03:53:28 -06:00
|
|
|
mutability: Option<Mutability>,
|
2018-11-17 02:54:58 -06:00
|
|
|
) -> EvalResult<'tcx> {
|
|
|
|
trace!("ptr_dereference: Accessing {} reference for {:?} (pointee {})",
|
2018-11-09 03:53:28 -06:00
|
|
|
if let Some(mutability) = mutability { format!("{:?}", mutability) } else { format!("raw") },
|
|
|
|
place.ptr, place.layout.ty);
|
2018-11-07 07:56:25 -06:00
|
|
|
let ptr = place.ptr.to_ptr()?;
|
2018-11-21 08:44:47 -06:00
|
|
|
if mutability.is_none() {
|
|
|
|
// No further checks on raw derefs -- only the access itself will be checked.
|
|
|
|
return Ok(());
|
2018-10-19 09:07:40 -05:00
|
|
|
}
|
2018-11-05 09:05:17 -06:00
|
|
|
|
2018-11-07 09:56:25 -06:00
|
|
|
// Get the allocation
|
2018-11-13 10:19:42 -06:00
|
|
|
let alloc = self.memory().get(ptr.alloc_id)?;
|
|
|
|
alloc.check_bounds(self, ptr, size)?;
|
2018-11-07 09:56:25 -06:00
|
|
|
// If we got here, we do some checking, *but* we leave the tag unchanged.
|
|
|
|
if let Borrow::Shr(Some(_)) = ptr.tag {
|
2018-11-09 03:53:28 -06:00
|
|
|
assert_eq!(mutability, Some(MutImmutable));
|
2018-11-07 09:56:25 -06:00
|
|
|
// We need a frozen-sensitive check
|
|
|
|
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
|
2018-11-09 03:53:28 -06:00
|
|
|
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
|
|
|
|
alloc.extra.deref(cur_ptr, size, kind)
|
2018-11-05 09:05:17 -06:00
|
|
|
})?;
|
2018-11-07 09:56:25 -06:00
|
|
|
} else {
|
|
|
|
// Just treat this as one big chunk
|
2018-11-09 03:53:28 -06:00
|
|
|
let kind = if mutability == Some(MutMutable) { RefKind::Unique } else { RefKind::Raw };
|
|
|
|
alloc.extra.deref(ptr, size, kind)?;
|
2018-10-19 09:07:40 -05:00
|
|
|
}
|
2018-11-05 09:05:17 -06:00
|
|
|
|
2018-11-17 02:54:58 -06:00
|
|
|
// All is good
|
|
|
|
Ok(())
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|
2018-10-22 11:01:32 -05:00
|
|
|
|
2018-11-07 09:56:25 -06:00
|
|
|
/// The given place may henceforth be accessed through raw pointers.
|
2018-11-15 07:25:23 -06:00
|
|
|
#[inline(always)]
|
2018-11-07 09:56:25 -06:00
|
|
|
fn escape_to_raw(
|
2018-10-22 11:01:32 -05:00
|
|
|
&mut self,
|
2018-11-07 09:56:25 -06:00
|
|
|
place: MPlaceTy<'tcx, Borrow>,
|
|
|
|
size: Size,
|
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-15 12:49:00 -06:00
|
|
|
self.reborrow(place, size, /*fn_barrier*/ false, Borrow::default())?;
|
2018-11-15 07:25:23 -06:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn reborrow(
|
|
|
|
&mut self,
|
|
|
|
place: MPlaceTy<'tcx, Borrow>,
|
|
|
|
size: Size,
|
2018-11-15 12:49:00 -06:00
|
|
|
fn_barrier: bool,
|
2018-11-15 07:25:23 -06:00
|
|
|
new_bor: Borrow
|
2018-11-28 02:33:33 -06:00
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-09 03:53:28 -06:00
|
|
|
let ptr = place.ptr.to_ptr()?;
|
2018-11-15 12:49:00 -06:00
|
|
|
let barrier = if fn_barrier { Some(self.frame().extra) } else { None };
|
2018-11-15 07:25:23 -06:00
|
|
|
trace!("reborrow: Creating new reference for {:?} (pointee {}): {:?}",
|
|
|
|
ptr, place.layout.ty, new_bor);
|
|
|
|
|
|
|
|
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
|
2018-11-13 10:19:42 -06:00
|
|
|
let alloc = self.memory().get(ptr.alloc_id)?;
|
|
|
|
alloc.check_bounds(self, ptr, size)?;
|
2018-11-15 07:25:23 -06:00
|
|
|
// Update the stacks.
|
|
|
|
if let Borrow::Shr(Some(_)) = new_bor {
|
|
|
|
// Reference that cares about freezing. We need a frozen-sensitive reborrow.
|
|
|
|
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
|
|
|
|
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
|
2018-11-21 08:25:47 -06:00
|
|
|
alloc.extra.reborrow(cur_ptr, size, barrier, new_bor, kind)
|
2018-11-15 07:25:23 -06:00
|
|
|
})?;
|
|
|
|
} else {
|
|
|
|
// Just treat this as one big chunk.
|
|
|
|
let kind = if new_bor.is_unique() { RefKind::Unique } else { RefKind::Raw };
|
2018-11-21 08:25:47 -06:00
|
|
|
alloc.extra.reborrow(ptr, size, barrier, new_bor, kind)?;
|
2018-11-15 07:25:23 -06:00
|
|
|
}
|
2018-11-28 02:33:33 -06:00
|
|
|
Ok(())
|
2018-10-22 11:01:32 -05:00
|
|
|
}
|
2018-10-24 10:17:44 -05:00
|
|
|
|
2018-11-15 07:25:23 -06:00
|
|
|
fn retag_reference(
|
2018-11-07 07:56:25 -06:00
|
|
|
&mut self,
|
|
|
|
val: ImmTy<'tcx, Borrow>,
|
2018-11-09 03:53:28 -06:00
|
|
|
mutbl: Mutability,
|
2018-11-15 12:49:00 -06:00
|
|
|
fn_barrier: bool,
|
2018-11-28 02:33:33 -06:00
|
|
|
two_phase: bool,
|
2018-11-07 07:56:25 -06:00
|
|
|
) -> EvalResult<'tcx, Immediate<Borrow>> {
|
|
|
|
// We want a place for where the ptr *points to*, so we get one.
|
|
|
|
let place = self.ref_to_mplace(val)?;
|
|
|
|
let size = self.size_and_align_of_mplace(place)?
|
|
|
|
.map(|(size, _)| size)
|
|
|
|
.unwrap_or_else(|| place.layout.size);
|
|
|
|
if size == Size::ZERO {
|
|
|
|
// Nothing to do for ZSTs.
|
|
|
|
return Ok(*val);
|
|
|
|
}
|
|
|
|
|
2018-11-15 07:25:23 -06:00
|
|
|
// Compute new borrow.
|
2018-11-07 07:56:25 -06:00
|
|
|
let time = self.machine.stacked_borrows.increment_clock();
|
|
|
|
let new_bor = match mutbl {
|
2018-11-09 03:53:28 -06:00
|
|
|
MutMutable => Borrow::Uniq(time),
|
|
|
|
MutImmutable => Borrow::Shr(Some(time)),
|
2018-11-07 07:56:25 -06:00
|
|
|
};
|
|
|
|
|
2018-11-15 07:25:23 -06:00
|
|
|
// Reborrow.
|
2018-11-28 02:33:33 -06:00
|
|
|
self.reborrow(place, size, fn_barrier, new_bor)?;
|
|
|
|
let new_place = place.with_tag(new_bor);
|
|
|
|
// Handle two-phase borrows.
|
|
|
|
if two_phase {
|
|
|
|
// We immediately share it, to allow read accesses
|
|
|
|
let two_phase_time = self.machine.stacked_borrows.increment_clock();
|
|
|
|
let two_phase_bor = Borrow::Shr(Some(two_phase_time));
|
|
|
|
self.reborrow(new_place, size, /*fn_barrier*/false, two_phase_bor)?;
|
|
|
|
}
|
2018-11-07 07:56:25 -06:00
|
|
|
|
2018-11-28 02:33:33 -06:00
|
|
|
// Return new ptr.
|
2018-11-07 07:56:25 -06:00
|
|
|
Ok(new_place.to_ref())
|
|
|
|
}
|
|
|
|
|
2018-10-24 10:17:44 -05:00
|
|
|
fn retag(
|
|
|
|
&mut self,
|
2018-11-15 12:49:00 -06:00
|
|
|
fn_entry: bool,
|
2018-11-28 02:33:33 -06:00
|
|
|
two_phase: bool,
|
2018-10-26 04:31:20 -05:00
|
|
|
place: PlaceTy<'tcx, Borrow>
|
2018-10-24 10:17:44 -05:00
|
|
|
) -> EvalResult<'tcx> {
|
2018-11-21 08:21:41 -06:00
|
|
|
// Determine mutability and whether to add a barrier.
|
|
|
|
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
|
|
|
|
// making it useless.
|
|
|
|
fn qualify(ty: ty::Ty<'_>, fn_entry: bool) -> Option<(Mutability, bool)> {
|
|
|
|
match ty.sty {
|
|
|
|
// References are simple
|
|
|
|
ty::Ref(_, _, mutbl) => Some((mutbl, fn_entry)),
|
|
|
|
// Boxes do not get a barrier: Barriers reflect that references outlive the call
|
|
|
|
// they were passed in to; that's just not the case for boxes.
|
|
|
|
ty::Adt(..) if ty.is_box() => Some((MutMutable, false)),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:48:20 -06:00
|
|
|
// We need a visitor to visit all references. However, that requires
|
|
|
|
// a `MemPlace`, so we have a fast path for reference types that
|
|
|
|
// avoids allocating.
|
2018-11-21 08:21:41 -06:00
|
|
|
if let Some((mutbl, barrier)) = qualify(place.layout.ty, fn_entry) {
|
2018-11-17 05:35:43 -06:00
|
|
|
// fast path
|
|
|
|
let val = self.read_immediate(self.place_to_op(place)?)?;
|
2018-11-28 02:33:33 -06:00
|
|
|
let val = self.retag_reference(val, mutbl, barrier, two_phase)?;
|
2018-11-17 05:35:43 -06:00
|
|
|
self.write_immediate(val, place)?;
|
|
|
|
return Ok(());
|
|
|
|
}
|
2018-11-13 05:48:20 -06:00
|
|
|
let place = self.force_allocation(place)?;
|
|
|
|
|
2018-11-28 02:33:33 -06:00
|
|
|
let mut visitor = RetagVisitor { ecx: self, fn_entry, two_phase };
|
2018-11-13 05:48:20 -06:00
|
|
|
visitor.visit_value(place)?;
|
|
|
|
|
|
|
|
// The actual visitor
|
|
|
|
struct RetagVisitor<'ecx, 'a, 'mir, 'tcx> {
|
|
|
|
ecx: &'ecx mut MiriEvalContext<'a, 'mir, 'tcx>,
|
2018-11-15 12:49:00 -06:00
|
|
|
fn_entry: bool,
|
2018-11-28 02:33:33 -06:00
|
|
|
two_phase: bool,
|
2018-11-13 05:48:20 -06:00
|
|
|
}
|
|
|
|
impl<'ecx, 'a, 'mir, 'tcx>
|
|
|
|
MutValueVisitor<'a, 'mir, 'tcx, Evaluator<'tcx>>
|
|
|
|
for
|
|
|
|
RetagVisitor<'ecx, 'a, 'mir, 'tcx>
|
|
|
|
{
|
|
|
|
type V = MPlaceTy<'tcx, Borrow>;
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
fn ecx(&mut self) -> &mut MiriEvalContext<'a, 'mir, 'tcx> {
|
|
|
|
&mut self.ecx
|
|
|
|
}
|
|
|
|
|
|
|
|
// Primitives of reference type, that is the one thing we are interested in.
|
|
|
|
fn visit_primitive(&mut self, place: MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>
|
|
|
|
{
|
2018-11-17 05:33:44 -06:00
|
|
|
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
|
|
|
|
// making it useless.
|
2018-11-21 08:21:41 -06:00
|
|
|
if let Some((mutbl, barrier)) = qualify(place.layout.ty, self.fn_entry) {
|
|
|
|
let val = self.ecx.read_immediate(place.into())?;
|
2018-11-28 02:33:33 -06:00
|
|
|
let val = self.ecx.retag_reference(val, mutbl, barrier, self.two_phase)?;
|
2018-11-21 08:21:41 -06:00
|
|
|
self.ecx.write_immediate(val, place.into())?;
|
|
|
|
}
|
2018-11-13 05:48:20 -06:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-24 10:17:44 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
2018-10-16 11:01:50 -05:00
|
|
|
}
|