Merge pull request #288 from RalfJung/mir-validate

Re-do memory locking
This commit is contained in:
Oliver Schneider 2017-08-07 22:44:21 +02:00 committed by GitHub
commit 547f49ce8c
6 changed files with 386 additions and 164 deletions

View File

@ -5,7 +5,7 @@ use rustc::mir;
use rustc::ty::{FnSig, Ty, layout};
use super::{
MemoryPointer, LockInfo, AccessKind
MemoryPointer, Lock, AccessKind
};
use rustc_const_math::ConstMathErr;
@ -84,23 +84,23 @@ pub enum EvalErrorKind<'tcx> {
len: u64,
frame: usize,
access: AccessKind,
lock: LockInfo,
lock: Lock,
},
MemoryAcquireConflict {
ptr: MemoryPointer,
len: u64,
kind: AccessKind,
lock: LockInfo,
lock: Lock,
},
InvalidMemoryLockRelease {
ptr: MemoryPointer,
len: u64,
frame: usize,
lock: LockInfo,
lock: Lock,
},
DeallocatedLockedMemory {
ptr: MemoryPointer,
lock: LockInfo,
lock: Lock,
},
ValidationFailure(String),
CalledClosureAsFunction,

View File

@ -1,6 +1,6 @@
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
use std::{fmt, iter, ptr, mem, io, ops};
use std::{fmt, iter, ptr, mem, io};
use std::cell::Cell;
use rustc::ty;
@ -13,66 +13,13 @@ use super::{
PrimVal, Pointer,
EvalContext, DynamicLifetime,
Machine,
RangeMap,
};
////////////////////////////////////////////////////////////////////////////////
// Locks
////////////////////////////////////////////////////////////////////////////////
mod range {
use super::*;
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
// by the second field.
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `end < start`, so don't do that!
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct MemoryRange {
start: u64,
end: u64,
}
impl MemoryRange {
pub fn new(offset: u64, len: u64) -> MemoryRange {
assert!(len > 0);
MemoryRange {
start: offset,
end: offset + len,
}
}
pub fn range(offset: u64, len: u64) -> ops::Range<MemoryRange> {
assert!(len > 0);
// We select all elements that are within
// the range given by the offset into the allocation and the length.
// This is sound if "self.contains() || self.overlaps() == true" implies that self is in-range.
let left = MemoryRange {
start: 0,
end: offset,
};
let right = MemoryRange {
start: offset + len + 1,
end: 0,
};
left..right
}
pub fn contained_in(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
offset <= self.start && self.end <= (offset + len)
}
pub fn overlaps(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
//let non_overlap = (offset + len) <= self.start || self.end <= offset;
(offset + len) > self.start && self.end > offset
}
}
}
use self::range::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum AccessKind {
Read,
@ -81,18 +28,51 @@ pub enum AccessKind {
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
pub enum LockInfo {
struct LockInfo {
suspended: Vec<SuspendedWriteLock>,
active: Lock,
}
#[derive(Clone, Debug)]
struct SuspendedWriteLock {
/// Original lifetime of the lock that is now suspended
lft: DynamicLifetime,
/// Regions that all have to end to reenable this suspension
suspensions: Vec<CodeExtent>,
}
#[derive(Clone, Debug)]
pub enum Lock {
NoLock,
WriteLock(DynamicLifetime),
ReadLock(Vec<DynamicLifetime>), // This should never be empty -- that would be a read lock held and nobody there to release it...
}
use self::LockInfo::*;
use self::Lock::*;
impl Default for LockInfo {
fn default() -> Self {
LockInfo::new(NoLock)
}
}
impl LockInfo {
fn new(lock: Lock) -> LockInfo {
LockInfo { suspended: Vec::new(), active: lock }
}
fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
use self::AccessKind::*;
match (self, access) {
(&ReadLock(_), Read) => true, // Read access to read-locked region is okay, no matter who's holding the read lock.
(&WriteLock(ref lft), _) if Some(lft.frame) == frame => true, // All access is okay when we hold the write lock.
match (&self.active, access) {
(&NoLock, _) => true,
(&ReadLock(ref lfts), Read) => {
assert!(!lfts.is_empty(), "Someone left an empty read lock behind.");
// Read access to read-locked region is okay, no matter who's holding the read lock.
true
},
(&WriteLock(ref lft), _) => {
// All access is okay if we are the ones holding it
Some(lft.frame) == frame
},
_ => false, // Nothing else is okay.
}
}
@ -130,25 +110,15 @@ pub struct Allocation<M> {
/// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
pub kind: Kind<M>,
/// Memory regions that are locked by some function
locks: BTreeMap<MemoryRange, LockInfo>,
locks: RangeMap<LockInfo>,
}
impl<M> Allocation<M> {
fn iter_locks<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a LockInfo)> + 'a {
self.locks.range(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
}
fn iter_locks_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a mut LockInfo)> + 'a {
self.locks.range_mut(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
}
fn check_locks<'tcx>(&self, frame: Option<usize>, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> {
if len == 0 {
return Ok(())
}
for (_, lock) in self.iter_locks(offset, len) {
for lock in self.locks.iter(offset, len) {
// Check if the lock is in conflict with the access.
if !lock.access_permitted(frame, access) {
return Err(lock.clone());
@ -328,7 +298,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
align,
kind,
mutable: Mutability::Mutable,
locks: BTreeMap::new(),
locks: RangeMap::new(),
};
let id = self.next_id;
self.next_id.0 += 1;
@ -385,7 +355,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
// lock by another frame. We *have* to permit deallocation if we hold a read lock.
// TODO: Figure out the exact rules here.
alloc.check_locks(Some(self.cur_frame), 0, alloc.bytes.len() as u64, AccessKind::Read)
.map_err(|lock| EvalErrorKind::DeallocatedLockedMemory { ptr, lock })?;
.map_err(|lock| EvalErrorKind::DeallocatedLockedMemory { ptr, lock: lock.active })?;
if alloc.kind != kind {
return err!(DeallocatedWrongMemoryKind(format!("{:?}", alloc.kind), format!("{:?}", kind)));
@ -465,70 +435,146 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
let alloc = self.get(ptr.alloc_id)?;
let frame = self.cur_frame;
alloc.check_locks(Some(frame), ptr.offset, len, access)
.map_err(|lock| EvalErrorKind::MemoryLockViolation { ptr, len, frame, access, lock }.into())
.map_err(|lock| EvalErrorKind::MemoryLockViolation { ptr, len, frame, access, lock: lock.active }.into())
}
/// Acquire the lock for the given lifetime
pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Option<CodeExtent>, kind: AccessKind) -> EvalResult<'tcx> {
use std::collections::btree_map::Entry::*;
let frame = self.cur_frame;
assert!(len > 0);
trace!("Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}", frame, kind, ptr, len, region);
self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
// Check if this conflicts with other locks
alloc.check_locks(None, ptr.offset, len, kind)
.map_err(|lock| EvalErrorKind::MemoryAcquireConflict { ptr, len, kind, lock })?;
// Iterate over our range and acquire the lock. If the range is already split into pieces,
// we have to manipulate all of them.
let lifetime = DynamicLifetime { frame, region };
match (alloc.locks.entry(MemoryRange::new(ptr.offset, len)), kind) {
(Vacant(entry), AccessKind::Read) => { entry.insert(ReadLock(vec![lifetime])); },
(Vacant(entry), AccessKind::Write) => { entry.insert(WriteLock(lifetime)); },
(Occupied(mut entry), AccessKind::Read) =>
match *entry.get_mut() {
ReadLock(ref mut lifetimes) => lifetimes.push(lifetime),
WriteLock(_) => bug!("We already checked that there is no conflicting write lock"),
},
(Occupied(_), AccessKind::Write) => bug!("We already checked that there is no conflicting lock"),
for lock in alloc.locks.iter_mut(ptr.offset, len) {
if !lock.access_permitted(None, kind) {
return err!(MemoryAcquireConflict { ptr, len, kind, lock: lock.active.clone() });
}
// See what we have to do
match (&mut lock.active, kind) {
(active @ &mut NoLock, AccessKind::Write) => {
*active = WriteLock(lifetime);
}
(active @ &mut NoLock, AccessKind::Read) => {
*active = ReadLock(vec![lifetime]);
}
(&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
lifetimes.push(lifetime);
}
_ => bug!("We already checked that there is no conflicting lock"),
}
};
Ok(())
}
/// Release a write lock prematurely. If there's a read lock or someone else's lock, fail.
pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64) -> EvalResult<'tcx> {
/// Release or suspend a write lock of the given lifetime prematurely.
/// When releasing, if there is no write lock or someone else's write lock, that's an error.
/// When suspending, the same cases are fine; we just register an additional suspension.
pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64,
lock_region: Option<CodeExtent>, suspend: Option<CodeExtent>) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let lock_lft = DynamicLifetime { frame: cur_frame, region: lock_region };
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
let mut remove_list : Vec<MemoryRange> = Vec::new();
for (range, lock) in alloc.iter_locks_mut(ptr.offset, len) {
match *lock {
WriteLock(ref lft) => {
// Make sure we can release this lock
if lft.frame != cur_frame {
return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.clone() });
}
if !range.contained_in(ptr.offset, len) {
return err!(Unimplemented(format!("miri does not support releasing part of a write-locked region")));
}
// Release it later. We cannot do this now.
remove_list.push(*range);
'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) {
trace!("Releasing {:?}", lock);
let is_our_lock = match lock.active {
WriteLock(lft) => {
lft == lock_lft
}
ReadLock(_) | NoLock => {
false
}
};
if is_our_lock {
// Disable the lock
lock.active = NoLock;
}
match suspend {
Some(suspend_region) => {
if is_our_lock {
// We just released this lock, so add a new suspension
lock.suspended.push(SuspendedWriteLock { lft: lock_lft, suspensions: vec![suspend_region] });
} else {
// Find our lock in the suspended ones
for suspended_lock in lock.suspended.iter_mut().rev() {
if suspended_lock.lft == lock_lft {
// Found it!
suspended_lock.suspensions.push(suspend_region);
continue 'locks;
}
}
// We did not find it. Someone else had the lock and we have not suspended it, that's just wrong.
return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.active.clone() });
}
}
None => {
// If we do not suspend, make sure we actually released something
if !is_our_lock {
return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.active.clone() });
}
}
ReadLock(_) => {
// Abort here and bubble the error outwards so that we do not even register a suspension.
return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.clone() });
},
}
}
for range in remove_list {
trace!("Releasing {:?}", alloc.locks[&range]);
alloc.locks.remove(&range);
}
Ok(())
}
// TODO: Test that we actually released a write lock for the entire covered region.
/// Release a suspension from the write lock. If this is the last suspension or if there is no suspension, acquire the lock.
pub(crate) fn recover_write_lock(&mut self, ptr: MemoryPointer, len: u64,
lock_region: Option<CodeExtent>, suspended_region: CodeExtent, )
-> EvalResult<'tcx>
{
assert!(len > 0);
let cur_frame = self.cur_frame;
let lock_lft = DynamicLifetime { frame: cur_frame, region: lock_region };
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
for lock in alloc.locks.iter_mut(ptr.offset, len) {
// If we have a suspension here, it will be the topmost one
let (got_the_lock, pop_suspension) = match lock.suspended.last_mut() {
None => (true, false),
Some(suspended_lock) => {
if suspended_lock.lft == lock_lft {
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
let idx = match suspended_lock.suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
None => // TODO: Can the user trigger this?
bug!("We have this lock suspended, but not for the given region."),
Some((idx, _)) => idx
};
suspended_lock.suspensions.remove(idx);
let got_lock = suspended_lock.suspensions.is_empty();
(got_lock, got_lock)
} else {
// Someone else's suspension up top, we should be able to grab the lock
(true, false)
}
}
};
if pop_suspension { // with NLL; we could do that up in the match above...
lock.suspended.pop();
} else {
// Sanity check: Our lock should not be in the suspension list
let found = lock.suspended.iter().find(|suspended_lock| suspended_lock.lft == lock_lft);
assert!(found.is_none());
}
if got_the_lock {
match lock.active {
ref mut active @ NoLock => {
*active = WriteLock(lock_lft);
}
_ => {
return err!(MemoryAcquireConflict { ptr, len, kind: AccessKind::Write, lock: lock.active.clone() })
}
}
}
}
Ok(())
}
@ -549,28 +595,28 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
};
for alloc in self.alloc_map.values_mut() {
// Collect things for removal as we cannot remove while iterating
let mut remove_list : Vec<MemoryRange> = Vec::new();
for (range, lock) in alloc.locks.iter_mut() {
for lock in alloc.locks.iter_mut_all() {
// Delete everything that ends now -- i.e., keep only all the other lifetimes.
match *lock {
let lock_ended = match lock.active {
WriteLock(ref lft) => {
if has_ended(lft) {
remove_list.push(*range);
}
has_ended(lft)
}
ReadLock(ref mut lfts) => {
lfts.retain(|lft| !has_ended(lft));
if lfts.is_empty() {
remove_list.push(*range);
}
},
lfts.is_empty()
}
NoLock => false,
};
if lock_ended {
lock.active = NoLock;
}
// Also clean up suspended write locks
lock.suspended.retain(|suspended_lock| !has_ended(&suspended_lock.lft));
}
// Perform delayed removal
for range in remove_list {
alloc.locks.remove(&range);
}
// Clean up the map
alloc.locks.retain(|lock| {
match lock.active { NoLock => lock.suspended.len() > 0, _ => true }
});
}
}
}

View File

@ -14,6 +14,7 @@ mod validation;
mod machine;
mod memory;
mod operator;
mod range_map;
mod step;
mod terminator;
mod traits;
@ -51,10 +52,14 @@ pub use self::memory::{
use self::memory::{
PointerArithmetic,
LockInfo,
Lock,
AccessKind,
};
use self::range_map::{
RangeMap
};
pub use self::value::{
PrimVal,
PrimValKind,

View File

@ -0,0 +1,149 @@
//! Implements a map from disjoint non-empty integer ranges to data associated with those ranges
use std::collections::{BTreeMap};
use std::ops;
#[derive(Clone, Debug)]
pub struct RangeMap<T> {
map: BTreeMap<Range, T>
}
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
// by the second field.
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `end < start`, so don't do that!
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
struct Range {
start: u64,
end: u64, // Invariant: end > start
}
impl Range {
fn range(offset: u64, len: u64) -> ops::Range<Range> {
// We select all elements that are within
// the range given by the offset into the allocation and the length.
// This is sound if all ranges that intersect with the argument range, are in the
// resulting range of ranges.
let left = Range { // lowest range to include `offset`
start: 0,
end: offset + 1,
};
let right = Range { // lowest (valid) range not to include `offset+len`
start: offset + len,
end: offset + len + 1,
};
left..right
}
fn overlaps(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
offset < self.end && offset+len >= self.start
}
}
impl<T> RangeMap<T> {
pub fn new() -> RangeMap<T> {
RangeMap { map: BTreeMap::new() }
}
fn iter_with_range<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=(&'a Range, &'a T)> + 'a {
self.map.range(Range::range(offset, len))
.filter_map(move |(range, data)| {
if range.overlaps(offset, len) {
Some((range, data))
} else {
None
}
})
}
pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=&'a T> + 'a {
self.iter_with_range(offset, len).map(|(_, data)| data)
}
fn split_entry_at(&mut self, offset: u64) where T: Clone {
let range = match self.iter_with_range(offset, 0).next() {
Some((&range, _)) => range,
None => return,
};
assert!(range.start <= offset && range.end > offset, "We got a range that doesn't even contain what we asked for.");
// There is an entry overlapping this position, see if we have to split it
if range.start < offset {
let data = self.map.remove(&range).unwrap();
let old = self.map.insert(Range { start: range.start, end: offset }, data.clone());
assert!(old.is_none());
let old = self.map.insert(Range { start: offset, end: range.end }, data);
assert!(old.is_none());
}
}
pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item=&'a mut T> + 'a {
self.map.values_mut()
}
/// Provide mutable iteration over everything in the given range. As a side-effect,
/// this will split entries in the map that are only partially hit by the given range,
/// to make sure that when they are mutated, the effect is constrained to the given range.
pub fn iter_mut_with_gaps<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=&'a mut T> + 'a
where T: Clone
{
// Preparation: Split first and last entry as needed.
self.split_entry_at(offset);
self.split_entry_at(offset+len);
// Now we can provide a mutable iterator
self.map.range_mut(Range::range(offset, len))
.filter_map(move |(&range, data)| {
if range.overlaps(offset, len) {
assert!(offset <= range.start && offset+len >= range.end, "The splitting went wrong");
Some(data)
} else {
// Skip this one
None
}
})
}
/// Provide a mutable iterator over everything in the given range, with the same side-effects as
/// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default.
/// This is also how you insert.
pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=&'a mut T> + 'a
where T: Clone + Default
{
// Do a first iteration to collect the gaps
let mut gaps = Vec::new();
let mut last_end = None;
for (range, _) in self.iter_with_range(offset, len) {
if let Some(last_end) = last_end {
if last_end < range.start {
gaps.push(Range { start: last_end, end: range.start });
}
}
last_end = Some(range.end);
}
// Add default for all gaps
for gap in gaps {
let old = self.map.insert(gap, Default::default());
assert!(old.is_none());
}
// Now provide mutable iteration
self.iter_mut_with_gaps(offset, len)
}
pub fn retain<F>(&mut self, mut f: F)
where F: FnMut(&T) -> bool
{
let mut remove = Vec::new();
for (range, data) in self.map.iter() {
if !f(data) {
remove.push(*range);
}
}
for range in remove {
self.map.remove(&range);
}
}
}

View File

@ -10,7 +10,7 @@ use rustc::middle::region::CodeExtent;
use super::{
EvalError, EvalResult, EvalErrorKind,
EvalContext, DynamicLifetime,
AccessKind, LockInfo,
AccessKind,
Value,
Lvalue, LvalueExtra,
Machine,
@ -23,7 +23,7 @@ enum ValidationMode {
Acquire,
/// Recover because the given region ended
Recover(CodeExtent),
Release
ReleaseUntil(Option<CodeExtent>),
}
impl ValidationMode {
@ -31,7 +31,7 @@ impl ValidationMode {
use self::ValidationMode::*;
match self {
Acquire | Recover(_) => true,
Release => false,
ReleaseUntil(_) => false,
}
}
}
@ -81,34 +81,20 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let lval = self.eval_lvalue(&operand.lval)?;
let query = ValidationQuery { lval, ty, re: operand.re, mutbl: operand.mutbl };
// Check the mode, and also perform mode-specific operations
let mode = match op {
ValidationOp::Acquire => ValidationMode::Acquire,
ValidationOp::Release => ValidationMode::Release,
ValidationOp::Suspend(_) => ValidationMode::Release,
};
match self.validate(query.clone(), mode) {
Err(EvalError { kind: EvalErrorKind::InvalidMemoryLockRelease { lock: LockInfo::ReadLock(_), .. }, .. }) => {
// HACK: When &x is used while x is already borrowed read-only, AddValidation still
// emits suspension. This code is legit, so just ignore the error *and*
// do NOT register a suspension.
// TODO: Integrate AddValidation better with borrowck so that we can/ not emit
// these wrong validation statements. This is all pretty fragile right now.
return Ok(());
}
res => res,
}?;
// Now that we are here, we know things went well. Time to register the suspension.
match op {
ValidationOp::Release => ValidationMode::ReleaseUntil(None),
ValidationOp::Suspend(ce) => {
if query.mutbl == MutMutable {
let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) };
trace!("Suspending {:?} until {:?}", query, ce);
self.suspended.entry(lft).or_insert_with(Vec::new).push(query);
self.suspended.entry(lft).or_insert_with(Vec::new).push(query.clone());
}
ValidationMode::ReleaseUntil(Some(ce))
}
_ => {}
};
Ok(())
self.validate(query, mode)
}
pub(crate) fn end_region(&mut self, ce: CodeExtent) -> EvalResult<'tcx> {
@ -162,7 +148,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// TODO: Ideally we would know whether the destination is already initialized, and only
// release if it is.
res @ Err(EvalError{ kind: EvalErrorKind::ReadUndefBytes, ..}) => {
if let ValidationMode::Release = mode {
if !mode.acquiring() {
return Ok(());
}
res
@ -182,8 +168,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
return Ok(());
}
// When we recover, we may see data whose validity *just* ended. Do not acquire it.
if let ValidationMode::Recover(ce) = mode {
if Some(ce) == query.re {
if let ValidationMode::Recover(ending_ce) = mode {
if query.re == Some(ending_ce) {
return Ok(());
}
}
@ -249,10 +235,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
if len > 0 {
let ptr = ptr.to_ptr()?;
let access = match query.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
if mode.acquiring() {
self.memory.acquire_lock(ptr, len, query.re, access)?;
} else {
self.memory.release_write_lock(ptr, len)?;
match mode {
ValidationMode::Acquire => self.memory.acquire_lock(ptr, len, query.re, access)?,
ValidationMode::Recover(ending_ce) => self.memory.recover_write_lock(ptr, len, query.re, ending_ce)?,
ValidationMode::ReleaseUntil(suspended_ce) => self.memory.release_write_lock(ptr, len, query.re, suspended_ce)?,
}
}
}

View File

@ -0,0 +1,36 @@
// Make sure validation can handle many overlapping shared borrows for difference parts of a data structure
#![allow(unused_variables)]
use std::cell::RefCell;
struct Test {
a: u32,
b: u32,
}
fn test1() {
let t = &mut Test { a: 0, b: 0 };
{
let x;
{
let y = &t.a;
x = &t;
let _y = *y;
}
let _x = x.a;
}
t.b = 42;
}
fn test2(r: &mut RefCell<i32>) {
let x = &*r; // releasing write lock, first suspension recorded
let mut x_ref = x.borrow_mut();
let x_inner : &mut i32 = &mut *x_ref;
let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension
let y = &*r; // second suspension for the outer write lock
let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock
}
fn main() {
test1();
test2(&mut RefCell::new(0));
}