Auto merge of #2315 - saethlin:shrink-item, r=saethlin
Optimizing Stacked Borrows (part 2): Shrink Item This moves protectors out of `Item`, storing them both in a global `HashSet` which contains all currently-protected tags as well as a `Vec<SbTag>` on each `Frame` so that when we return from a function we know which tags to remove from the protected set. This also bit-packs the 64-bit tag and the 2-bit permission together when they are stored in memory. This means we theoretically run out of tags sooner, but I doubt that limit will ever be hit. Together these optimizations reduce the memory footprint of Miri when executing programs which stress Stacked Borrows by ~66%. For example, running a test with isolation off which only panics currently peaks at ~19 GB, with this PR it peaks at ~6.2 GB. To-do - [x] Enforce the 62-bit limit - [x] Decide if there is a better order to pack the tag and permission in - [x] Wait for `UnsafeCell` to become infectious, or express offsets + tags in the global protector set Benchmarks before: ``` Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/backtraces/Cargo.toml Time (mean ± σ): 8.948 s ± 0.253 s [User: 8.752 s, System: 0.158 s] Range (min … max): 8.619 s … 9.279 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/mse/Cargo.toml Time (mean ± σ): 2.129 s ± 0.037 s [User: 1.849 s, System: 0.248 s] Range (min … max): 2.086 s … 2.176 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/serde1/Cargo.toml Time (mean ± σ): 3.334 s ± 0.017 s [User: 3.211 s, System: 0.103 s] Range (min … max): 3.315 s … 3.352 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/serde2/Cargo.toml Time (mean ± σ): 3.316 s ± 0.038 s [User: 3.207 s, System: 0.095 s] Range (min … max): 3.282 s … 3.375 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/unicode/Cargo.toml Time (mean ± σ): 6.391 s ± 0.323 s [User: 5.928 s, System: 0.412 s] Range (min … max): 6.090 s … 6.917 s 5 runs ``` After: ``` Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/backtraces/Cargo.toml Time (mean ± σ): 6.955 s ± 0.051 s [User: 6.807 s, System: 0.132 s] Range (min … max): 6.900 s … 7.038 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/mse/Cargo.toml Time (mean ± σ): 1.784 s ± 0.012 s [User: 1.627 s, System: 0.156 s] Range (min … max): 1.772 s … 1.797 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/serde1/Cargo.toml Time (mean ± σ): 2.505 s ± 0.095 s [User: 2.311 s, System: 0.096 s] Range (min … max): 2.405 s … 2.603 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/serde2/Cargo.toml Time (mean ± σ): 2.449 s ± 0.031 s [User: 2.306 s, System: 0.100 s] Range (min … max): 2.395 s … 2.467 s 5 runs Benchmark 1: cargo +miri miri run --manifest-path bench-cargo-miri/unicode/Cargo.toml Time (mean ± σ): 3.667 s ± 0.110 s [User: 3.498 s, System: 0.140 s] Range (min … max): 3.564 s … 3.814 s 5 runs ``` The decrease in system time is probably due to spending less time in the page fault handler.
This commit is contained in:
commit
db5a2b9747
@ -90,8 +90,8 @@
|
||||
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
|
||||
pub use crate::range_map::RangeMap;
|
||||
pub use crate::stacked_borrows::{
|
||||
stack::Stack, CallId, EvalContextExt as StackedBorEvalContextExt, Item, Permission, SbTag,
|
||||
SbTagExtra, Stacks,
|
||||
CallId, EvalContextExt as StackedBorEvalContextExt, Item, Permission, SbTag, SbTagExtra, Stack,
|
||||
Stacks,
|
||||
};
|
||||
pub use crate::sync::{CondvarId, EvalContextExt as SyncEvalContextExt, MutexId, RwLockId};
|
||||
pub use crate::thread::{
|
||||
|
@ -5,7 +5,6 @@
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
use std::num::NonZeroU64;
|
||||
use std::time::Instant;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
@ -43,7 +42,7 @@
|
||||
/// Extra data stored with each stack frame
|
||||
pub struct FrameData<'tcx> {
|
||||
/// Extra data for Stacked Borrows.
|
||||
pub call_id: stacked_borrows::CallId,
|
||||
pub stacked_borrows: Option<stacked_borrows::FrameExtra>,
|
||||
|
||||
/// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
|
||||
/// called by `try`). When this frame is popped during unwinding a panic,
|
||||
@ -59,9 +58,9 @@ pub struct FrameData<'tcx> {
|
||||
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Omitting `timing`, it does not support `Debug`.
|
||||
let FrameData { call_id, catch_unwind, timing: _ } = self;
|
||||
let FrameData { stacked_borrows, catch_unwind, timing: _ } = self;
|
||||
f.debug_struct("FrameData")
|
||||
.field("call_id", call_id)
|
||||
.field("stacked_borrows", stacked_borrows)
|
||||
.field("catch_unwind", catch_unwind)
|
||||
.finish()
|
||||
}
|
||||
@ -788,6 +787,7 @@ fn memory_read(
|
||||
range,
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
machine.current_span(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
}
|
||||
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
||||
@ -819,6 +819,7 @@ fn memory_written(
|
||||
range,
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
machine.current_span(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
}
|
||||
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
||||
@ -852,6 +853,7 @@ fn memory_deallocated(
|
||||
tag,
|
||||
range,
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
&machine.threads,
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -888,11 +890,12 @@ fn init_frame_extra(
|
||||
};
|
||||
|
||||
let stacked_borrows = ecx.machine.stacked_borrows.as_ref();
|
||||
let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| {
|
||||
stacked_borrows.borrow_mut().new_call()
|
||||
});
|
||||
|
||||
let extra = FrameData { call_id, catch_unwind: None, timing };
|
||||
let extra = FrameData {
|
||||
stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame()),
|
||||
catch_unwind: None,
|
||||
timing,
|
||||
};
|
||||
Ok(frame.with_extra(extra))
|
||||
}
|
||||
|
||||
@ -936,7 +939,7 @@ fn after_stack_pop(
|
||||
) -> InterpResult<'tcx, StackPopJump> {
|
||||
let timing = frame.extra.timing.take();
|
||||
if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
|
||||
stacked_borrows.borrow_mut().end_call(frame.extra.call_id);
|
||||
stacked_borrows.borrow_mut().end_call(&frame.extra);
|
||||
}
|
||||
let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
|
||||
if let Some(profiler) = ecx.machine.profiler.as_ref() {
|
||||
|
@ -16,6 +16,7 @@
|
||||
};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_target::abi::Size;
|
||||
use smallvec::SmallVec;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::*;
|
||||
@ -23,8 +24,10 @@
|
||||
pub mod diagnostics;
|
||||
use diagnostics::{AllocHistory, TagHistory};
|
||||
|
||||
pub mod stack;
|
||||
use stack::Stack;
|
||||
mod item;
|
||||
pub use item::{Item, Permission};
|
||||
mod stack;
|
||||
pub use stack::Stack;
|
||||
|
||||
pub type CallId = NonZeroU64;
|
||||
|
||||
@ -78,40 +81,20 @@ fn and_then<T>(self, f: impl FnOnce(SbTag) -> Option<T>) -> Option<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates which permission is granted (by this item to some pointers)
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub enum Permission {
|
||||
/// Grants unique mutable access.
|
||||
Unique,
|
||||
/// Grants shared mutable access.
|
||||
SharedReadWrite,
|
||||
/// Grants shared read-only access.
|
||||
SharedReadOnly,
|
||||
/// Grants no access, but separates two groups of SharedReadWrite so they are not
|
||||
/// all considered mutually compatible.
|
||||
Disabled,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct FrameExtra {
|
||||
/// The ID of the call this frame corresponds to.
|
||||
call_id: CallId,
|
||||
|
||||
/// An item in the per-location borrow stack.
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
|
||||
pub struct Item {
|
||||
/// The permission this item grants.
|
||||
perm: Permission,
|
||||
/// The pointers the permission is granted to.
|
||||
tag: SbTag,
|
||||
/// An optional protector, ensuring the item cannot get popped until `CallId` is over.
|
||||
protector: Option<CallId>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Item {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "[{:?} for {:?}", self.perm, self.tag)?;
|
||||
if let Some(call) = self.protector {
|
||||
write!(f, " (call {})", call)?;
|
||||
}
|
||||
write!(f, "]")?;
|
||||
Ok(())
|
||||
}
|
||||
/// If this frame is protecting any tags, they are listed here. We use this list to do
|
||||
/// incremental updates of the global list of protected tags stored in the
|
||||
/// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected
|
||||
/// tag, to identify which call is responsible for protecting the tag.
|
||||
/// See `Stack::item_popped` for more explanation.
|
||||
///
|
||||
/// This will contain one tag per reference passed to the function, so
|
||||
/// a size of 2 is enough for the vast majority of functions.
|
||||
protected_tags: SmallVec<[SbTag; 2]>,
|
||||
}
|
||||
|
||||
/// Extra per-allocation state.
|
||||
@ -136,8 +119,12 @@ pub struct GlobalStateInner {
|
||||
base_ptr_tags: FxHashMap<AllocId, SbTag>,
|
||||
/// Next unused call ID (for protectors).
|
||||
next_call_id: CallId,
|
||||
/// Those call IDs corresponding to functions that are still running.
|
||||
active_calls: FxHashSet<CallId>,
|
||||
/// All currently protected tags.
|
||||
/// An item is protected if its tag is in this set, *and* it has the "protected" bit set.
|
||||
/// We add tags to this when they are created with a protector in `reborrow`, and
|
||||
/// we remove tags from this when the call which is protecting them returns, in
|
||||
/// `GlobalStateInner::end_call`. See `Stack::item_popped` for more details.
|
||||
protected_tags: FxHashSet<SbTag>,
|
||||
/// The pointer ids to trace
|
||||
tracked_pointer_tags: HashSet<SbTag>,
|
||||
/// The call ids to trace
|
||||
@ -201,7 +188,7 @@ pub fn new(
|
||||
next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
|
||||
base_ptr_tags: FxHashMap::default(),
|
||||
next_call_id: NonZeroU64::new(1).unwrap(),
|
||||
active_calls: FxHashSet::default(),
|
||||
protected_tags: FxHashSet::default(),
|
||||
tracked_pointer_tags,
|
||||
tracked_call_ids,
|
||||
retag_fields,
|
||||
@ -215,23 +202,25 @@ fn new_ptr(&mut self) -> SbTag {
|
||||
id
|
||||
}
|
||||
|
||||
pub fn new_call(&mut self) -> CallId {
|
||||
let id = self.next_call_id;
|
||||
trace!("new_call: Assigning ID {}", id);
|
||||
if self.tracked_call_ids.contains(&id) {
|
||||
register_diagnostic(NonHaltingDiagnostic::CreatedCallId(id));
|
||||
pub fn new_frame(&mut self) -> FrameExtra {
|
||||
let call_id = self.next_call_id;
|
||||
trace!("new_frame: Assigning call ID {}", call_id);
|
||||
if self.tracked_call_ids.contains(&call_id) {
|
||||
register_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id));
|
||||
}
|
||||
assert!(self.active_calls.insert(id));
|
||||
self.next_call_id = NonZeroU64::new(id.get() + 1).unwrap();
|
||||
id
|
||||
self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap();
|
||||
FrameExtra { call_id, protected_tags: SmallVec::new() }
|
||||
}
|
||||
|
||||
pub fn end_call(&mut self, id: CallId) {
|
||||
assert!(self.active_calls.remove(&id));
|
||||
}
|
||||
|
||||
fn is_active(&self, id: CallId) -> bool {
|
||||
self.active_calls.contains(&id)
|
||||
pub fn end_call(&mut self, frame: &machine::FrameData<'_>) {
|
||||
for tag in &frame
|
||||
.stacked_borrows
|
||||
.as_ref()
|
||||
.expect("we should have Stacked Borrows data")
|
||||
.protected_tags
|
||||
{
|
||||
self.protected_tags.remove(tag);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn base_ptr_tag(&mut self, id: AllocId) -> SbTag {
|
||||
@ -287,7 +276,7 @@ impl<'tcx> Stack {
|
||||
/// Find the first write-incompatible item above the given one --
|
||||
/// i.e, find the height to which the stack will be truncated when writing to `granting`.
|
||||
fn find_first_write_incompatible(&self, granting: usize) -> usize {
|
||||
let perm = self.get(granting).unwrap().perm;
|
||||
let perm = self.get(granting).unwrap().perm();
|
||||
match perm {
|
||||
Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
|
||||
Permission::Disabled => bug!("Cannot use Disabled for anything"),
|
||||
@ -299,7 +288,7 @@ fn find_first_write_incompatible(&self, granting: usize) -> usize {
|
||||
// The SharedReadWrite *just* above us are compatible, to skip those.
|
||||
let mut idx = granting + 1;
|
||||
while let Some(item) = self.get(idx) {
|
||||
if item.perm == Permission::SharedReadWrite {
|
||||
if item.perm() == Permission::SharedReadWrite {
|
||||
// Go on.
|
||||
idx += 1;
|
||||
} else {
|
||||
@ -325,32 +314,67 @@ fn item_popped(
|
||||
provoking_access: Option<(SbTagExtra, AllocRange, Size, AccessKind)>, // just for debug printing and error messages
|
||||
global: &GlobalStateInner,
|
||||
alloc_history: &mut AllocHistory,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
if global.tracked_pointer_tags.contains(&item.tag) {
|
||||
if global.tracked_pointer_tags.contains(&item.tag()) {
|
||||
register_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(
|
||||
*item,
|
||||
provoking_access.map(|(tag, _alloc_range, _size, access)| (tag, access)),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(call) = item.protector {
|
||||
if global.is_active(call) {
|
||||
if let Some((tag, _alloc_range, _offset, _access)) = provoking_access {
|
||||
Err(err_sb_ub(
|
||||
format!(
|
||||
"not granting access to tag {:?} because incompatible item is protected: {:?}",
|
||||
tag, item
|
||||
),
|
||||
None,
|
||||
tag.and_then(|tag| alloc_history.get_logs_relevant_to(tag, Some(item.tag))),
|
||||
))?
|
||||
} else {
|
||||
Err(err_sb_ub(
|
||||
format!("deallocating while item is protected: {:?}", item),
|
||||
None,
|
||||
None,
|
||||
))?
|
||||
}
|
||||
if !item.protected() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// We store tags twice, once in global.protected_tags and once in each call frame.
|
||||
// We do this because consulting a single global set in this function is faster
|
||||
// than attempting to search all call frames in the program for the `FrameExtra`
|
||||
// (if any) which is protecting the popped tag.
|
||||
//
|
||||
// This duplication trades off making `end_call` slower to make this function faster. This
|
||||
// trade-off is profitable in practice for a combination of two reasons.
|
||||
// 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
|
||||
// Therefore, adding overhead to in function call/return is profitable even if it only
|
||||
// saves a little work in this function.
|
||||
// 2. Most frames protect only one or two tags. So this duplicative global turns a search
|
||||
// which ends up about linear in the number of protected tags in the program into a
|
||||
// constant time check (and a slow linear, because the tags in the frames aren't contiguous).
|
||||
if global.protected_tags.contains(&item.tag()) {
|
||||
// This path is cold because it is fatal to the program. So here it is fine to do the
|
||||
// more expensive search to figure out which call is responsible for protecting this
|
||||
// tag.
|
||||
let call_id = threads
|
||||
.all_stacks()
|
||||
.flatten()
|
||||
.map(|frame| {
|
||||
frame
|
||||
.extra
|
||||
.stacked_borrows
|
||||
.as_ref()
|
||||
.expect("we should have Stacked Borrows data")
|
||||
})
|
||||
.find(|frame| frame.protected_tags.contains(&item.tag()))
|
||||
.map(|frame| frame.call_id)
|
||||
.unwrap(); // FIXME: Surely we should find something, but a panic seems wrong here?
|
||||
if let Some((tag, _alloc_range, _offset, _access)) = provoking_access {
|
||||
Err(err_sb_ub(
|
||||
format!(
|
||||
"not granting access to tag {:?} because incompatible item {:?} is protected by call {:?}",
|
||||
tag, item, call_id
|
||||
),
|
||||
None,
|
||||
tag.and_then(|tag| alloc_history.get_logs_relevant_to(tag, Some(item.tag()))),
|
||||
))?
|
||||
} else {
|
||||
Err(err_sb_ub(
|
||||
format!(
|
||||
"deallocating while item {:?} is protected by call {:?}",
|
||||
item, call_id
|
||||
),
|
||||
None,
|
||||
None,
|
||||
))?
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -369,6 +393,7 @@ fn access(
|
||||
current_span: &mut CurrentSpan<'_, '_, 'tcx>,
|
||||
alloc_history: &mut AllocHistory,
|
||||
exposed_tags: &FxHashSet<SbTag>,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Two main steps: Find granting item, remove incompatible items above.
|
||||
|
||||
@ -399,8 +424,9 @@ fn access(
|
||||
Some((tag, alloc_range, offset, access)),
|
||||
global,
|
||||
alloc_history,
|
||||
threads,
|
||||
)?;
|
||||
alloc_history.log_invalidation(item.tag, alloc_range, current_span);
|
||||
alloc_history.log_invalidation(item.tag(), alloc_range, current_span);
|
||||
Ok(())
|
||||
})?;
|
||||
} else {
|
||||
@ -425,8 +451,9 @@ fn access(
|
||||
Some((tag, alloc_range, offset, access)),
|
||||
global,
|
||||
alloc_history,
|
||||
threads,
|
||||
)?;
|
||||
alloc_history.log_invalidation(item.tag, alloc_range, current_span);
|
||||
alloc_history.log_invalidation(item.tag(), alloc_range, current_span);
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
@ -439,9 +466,9 @@ fn access(
|
||||
for i in 0..self.len() {
|
||||
let item = self.get(i).unwrap();
|
||||
// Skip disabled items, they cannot be matched anyway.
|
||||
if !matches!(item.perm, Permission::Disabled) {
|
||||
if !matches!(item.perm(), Permission::Disabled) {
|
||||
// We are looking for a strict upper bound, so add 1 to this tag.
|
||||
max = cmp::max(item.tag.0.checked_add(1).unwrap(), max);
|
||||
max = cmp::max(item.tag().0.checked_add(1).unwrap(), max);
|
||||
}
|
||||
}
|
||||
if let Some(unk) = self.unknown_bottom() {
|
||||
@ -467,6 +494,7 @@ fn dealloc(
|
||||
global: &GlobalStateInner,
|
||||
alloc_history: &mut AllocHistory,
|
||||
exposed_tags: &FxHashSet<SbTag>,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Step 1: Make sure there is a granting item.
|
||||
self.find_granting(AccessKind::Write, tag, exposed_tags).map_err(|_| {
|
||||
@ -482,7 +510,7 @@ fn dealloc(
|
||||
// Step 2: Consider all items removed. This checks for protectors.
|
||||
for idx in (0..self.len()).rev() {
|
||||
let item = self.get(idx).unwrap();
|
||||
Stack::item_popped(&item, None, global, alloc_history)?;
|
||||
Stack::item_popped(&item, None, global, alloc_history, threads)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -502,10 +530,11 @@ fn grant(
|
||||
current_span: &mut CurrentSpan<'_, '_, 'tcx>,
|
||||
alloc_history: &mut AllocHistory,
|
||||
exposed_tags: &FxHashSet<SbTag>,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Figure out which access `perm` corresponds to.
|
||||
let access =
|
||||
if new.perm.grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
|
||||
if new.perm().grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
|
||||
|
||||
// Now we figure out which item grants our parent (`derived_from`) this kind of access.
|
||||
// We use that to determine where to put the new item.
|
||||
@ -517,7 +546,7 @@ fn grant(
|
||||
// Compute where to put the new item.
|
||||
// Either way, we ensure that we insert the new item in a way such that between
|
||||
// `derived_from` and the new one, there are only items *compatible with* `derived_from`.
|
||||
let new_idx = if new.perm == Permission::SharedReadWrite {
|
||||
let new_idx = if new.perm() == Permission::SharedReadWrite {
|
||||
assert!(
|
||||
access == AccessKind::Write,
|
||||
"this case only makes sense for stack-like accesses"
|
||||
@ -550,6 +579,7 @@ fn grant(
|
||||
current_span,
|
||||
alloc_history,
|
||||
exposed_tags,
|
||||
threads,
|
||||
)?;
|
||||
|
||||
// We insert "as far up as possible": We know only compatible items are remaining
|
||||
@ -571,7 +601,7 @@ fn grant(
|
||||
impl<'tcx> Stacks {
|
||||
/// Creates new stack with initial tag.
|
||||
fn new(size: Size, perm: Permission, tag: SbTag) -> Self {
|
||||
let item = Item { perm, tag, protector: None };
|
||||
let item = Item::new(tag, perm, false);
|
||||
let stack = Stack::new(item);
|
||||
|
||||
Stacks {
|
||||
@ -637,6 +667,7 @@ pub fn memory_read<'tcx>(
|
||||
range: AllocRange,
|
||||
state: &GlobalState,
|
||||
mut current_span: CurrentSpan<'_, '_, 'tcx>,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!(
|
||||
"read access with tag {:?}: {:?}, size {}",
|
||||
@ -654,6 +685,7 @@ pub fn memory_read<'tcx>(
|
||||
&mut current_span,
|
||||
history,
|
||||
exposed_tags,
|
||||
threads,
|
||||
)
|
||||
})
|
||||
}
|
||||
@ -666,6 +698,7 @@ pub fn memory_written<'tcx>(
|
||||
range: AllocRange,
|
||||
state: &GlobalState,
|
||||
mut current_span: CurrentSpan<'_, '_, 'tcx>,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!(
|
||||
"write access with tag {:?}: {:?}, size {}",
|
||||
@ -683,6 +716,7 @@ pub fn memory_written<'tcx>(
|
||||
&mut current_span,
|
||||
history,
|
||||
exposed_tags,
|
||||
threads,
|
||||
)
|
||||
})
|
||||
}
|
||||
@ -694,11 +728,12 @@ pub fn memory_deallocated<'tcx>(
|
||||
tag: SbTagExtra,
|
||||
range: AllocRange,
|
||||
state: &GlobalState,
|
||||
threads: &ThreadManager<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
|
||||
let state = state.borrow();
|
||||
self.for_each(range, |offset, stack, history, exposed_tags| {
|
||||
stack.dealloc(tag, (alloc_id, range, offset), &state, history, exposed_tags)
|
||||
stack.dealloc(tag, (alloc_id, range, offset), &state, history, exposed_tags, threads)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
@ -801,7 +836,6 @@ fn reborrow(
|
||||
});
|
||||
}
|
||||
|
||||
let protector = if protect { Some(this.frame().extra.call_id) } else { None };
|
||||
trace!(
|
||||
"reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
|
||||
kind,
|
||||
@ -812,6 +846,13 @@ fn reborrow(
|
||||
size.bytes()
|
||||
);
|
||||
|
||||
if protect {
|
||||
this.frame_mut().extra.stacked_borrows.as_mut().unwrap().protected_tags.push(new_tag);
|
||||
this.machine.stacked_borrows.as_mut().unwrap().get_mut().protected_tags.insert(new_tag);
|
||||
}
|
||||
// FIXME: can't hold the current span handle across the borrows of self above
|
||||
let current_span = &mut this.machine.current_span();
|
||||
|
||||
// Update the stacks.
|
||||
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
|
||||
// There could be existing unique pointers reborrowed from them that should remain valid!
|
||||
@ -848,15 +889,16 @@ fn reborrow(
|
||||
} else {
|
||||
Permission::SharedReadWrite
|
||||
};
|
||||
let protector = if frozen {
|
||||
protector
|
||||
let protected = if frozen {
|
||||
protect
|
||||
} else {
|
||||
// We do not protect inside UnsafeCell.
|
||||
// This fixes https://github.com/rust-lang/rust/issues/55005.
|
||||
None
|
||||
false
|
||||
};
|
||||
let item = Item { perm, tag: new_tag, protector };
|
||||
let item = Item::new(new_tag, perm, protected);
|
||||
let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
||||
let threads = &this.machine.threads;
|
||||
stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
|
||||
stack.grant(
|
||||
orig_tag,
|
||||
@ -866,6 +908,7 @@ fn reborrow(
|
||||
current_span,
|
||||
history,
|
||||
exposed_tags,
|
||||
threads,
|
||||
)
|
||||
})
|
||||
})?;
|
||||
@ -881,9 +924,10 @@ fn reborrow(
|
||||
.as_mut()
|
||||
.expect("we should have Stacked Borrows data")
|
||||
.borrow_mut();
|
||||
let item = Item { perm, tag: new_tag, protector };
|
||||
let item = Item::new(new_tag, perm, protect);
|
||||
let range = alloc_range(base_offset, size);
|
||||
let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
||||
let threads = &machine.threads;
|
||||
let current_span = &mut machine.current_span(); // `get_alloc_extra_mut` invalidated our old `current_span`
|
||||
stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
|
||||
stack.grant(
|
||||
@ -894,6 +938,7 @@ fn reborrow(
|
||||
current_span,
|
||||
history,
|
||||
exposed_tags,
|
||||
threads,
|
||||
)
|
||||
})?;
|
||||
|
||||
|
@ -141,7 +141,7 @@ pub fn grant_error<'tcx>(
|
||||
) -> InterpError<'tcx> {
|
||||
let action = format!(
|
||||
"trying to reborrow {derived_from:?} for {new_perm:?} permission at {alloc_id:?}[{offset:#x}]",
|
||||
new_perm = new.perm,
|
||||
new_perm = new.perm(),
|
||||
offset = error_offset.bytes(),
|
||||
);
|
||||
err_sb_ub(
|
||||
@ -185,7 +185,7 @@ fn error_cause(stack: &Stack, tag: SbTagExtra) -> &'static str {
|
||||
if let SbTagExtra::Concrete(tag) = tag {
|
||||
if (0..stack.len())
|
||||
.map(|i| stack.get(i).unwrap())
|
||||
.any(|item| item.tag == tag && item.perm != Permission::Disabled)
|
||||
.any(|item| item.tag() == tag && item.perm() != Permission::Disabled)
|
||||
{
|
||||
", but that tag only grants SharedReadOnly permission for this location"
|
||||
} else {
|
||||
|
104
src/stacked_borrows/item.rs
Normal file
104
src/stacked_borrows/item.rs
Normal file
@ -0,0 +1,104 @@
|
||||
use crate::stacked_borrows::SbTag;
|
||||
use std::fmt;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
/// An item in the per-location borrow stack.
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
|
||||
pub struct Item(u64);
|
||||
|
||||
// An Item contains 3 bitfields:
|
||||
// * Bits 0-61 store an SbTag
|
||||
// * Bits 61-63 store a Permission
|
||||
// * Bit 64 stores a flag which indicates if we have a protector
|
||||
const TAG_MASK: u64 = u64::MAX >> 3;
|
||||
const PERM_MASK: u64 = 0x3 << 61;
|
||||
const PROTECTED_MASK: u64 = 0x1 << 63;
|
||||
|
||||
const PERM_SHIFT: u64 = 61;
|
||||
const PROTECTED_SHIFT: u64 = 63;
|
||||
|
||||
impl Item {
|
||||
pub fn new(tag: SbTag, perm: Permission, protected: bool) -> Self {
|
||||
assert!(tag.0.get() <= TAG_MASK);
|
||||
let packed_tag = tag.0.get();
|
||||
let packed_perm = perm.to_bits() << PERM_SHIFT;
|
||||
let packed_protected = (protected as u64) << PROTECTED_SHIFT;
|
||||
|
||||
let new = Self(packed_tag | packed_perm | packed_protected);
|
||||
|
||||
debug_assert!(new.tag() == tag);
|
||||
debug_assert!(new.perm() == perm);
|
||||
debug_assert!(new.protected() == protected);
|
||||
|
||||
new
|
||||
}
|
||||
|
||||
/// The pointers the permission is granted to.
|
||||
pub fn tag(self) -> SbTag {
|
||||
SbTag(NonZeroU64::new(self.0 & TAG_MASK).unwrap())
|
||||
}
|
||||
|
||||
/// The permission this item grants.
|
||||
pub fn perm(self) -> Permission {
|
||||
Permission::from_bits((self.0 & PERM_MASK) >> PERM_SHIFT)
|
||||
}
|
||||
|
||||
/// Whether or not there is a protector for this tag
|
||||
pub fn protected(self) -> bool {
|
||||
self.0 & PROTECTED_MASK > 0
|
||||
}
|
||||
|
||||
/// Set the Permission stored in this Item
|
||||
pub fn set_permission(&mut self, perm: Permission) {
|
||||
// Clear the current set permission
|
||||
self.0 &= !PERM_MASK;
|
||||
// Write Permission::Disabled to the Permission bits
|
||||
self.0 |= perm.to_bits() << PERM_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Item {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "[{:?} for {:?}]", self.perm(), self.tag())
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates which permission is granted (by this item to some pointers)
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub enum Permission {
|
||||
/// Grants unique mutable access.
|
||||
Unique,
|
||||
/// Grants shared mutable access.
|
||||
SharedReadWrite,
|
||||
/// Grants shared read-only access.
|
||||
SharedReadOnly,
|
||||
/// Grants no access, but separates two groups of SharedReadWrite so they are not
|
||||
/// all considered mutually compatible.
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl Permission {
|
||||
const UNIQUE: u64 = 0;
|
||||
const SHARED_READ_WRITE: u64 = 1;
|
||||
const SHARED_READ_ONLY: u64 = 2;
|
||||
const DISABLED: u64 = 3;
|
||||
|
||||
fn to_bits(self) -> u64 {
|
||||
match self {
|
||||
Permission::Unique => Self::UNIQUE,
|
||||
Permission::SharedReadWrite => Self::SHARED_READ_WRITE,
|
||||
Permission::SharedReadOnly => Self::SHARED_READ_ONLY,
|
||||
Permission::Disabled => Self::DISABLED,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_bits(perm: u64) -> Self {
|
||||
match perm {
|
||||
Self::UNIQUE => Permission::Unique,
|
||||
Self::SHARED_READ_WRITE => Permission::SharedReadWrite,
|
||||
Self::SHARED_READ_ONLY => Permission::SharedReadOnly,
|
||||
Self::DISABLED => Permission::Disabled,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
@ -37,7 +37,7 @@ pub struct Stack {
|
||||
}
|
||||
|
||||
/// A very small cache of searches of the borrow stack
|
||||
/// This maps tags to locations in the borrow stack. Any use of this still needs to do a
|
||||
/// This maps items to locations in the borrow stack. Any use of this still needs to do a
|
||||
/// probably-cold random access into the borrow stack to figure out what `Permission` an
|
||||
/// `SbTag` grants. We could avoid this by also storing the `Permission` in the cache, but
|
||||
/// most lookups into the cache are immediately followed by access of the full borrow stack anyway.
|
||||
@ -48,7 +48,7 @@ pub struct Stack {
|
||||
#[cfg(feature = "stack-cache")]
|
||||
#[derive(Clone, Debug)]
|
||||
struct StackCache {
|
||||
tags: [SbTag; CACHE_LEN], // Hot in find_granting
|
||||
items: [Item; CACHE_LEN], // Hot in find_granting
|
||||
idx: [usize; CACHE_LEN], // Hot in grant
|
||||
}
|
||||
|
||||
@ -59,11 +59,11 @@ impl StackCache {
|
||||
/// We use the position in the cache to represent how recently a tag was used; the first position
|
||||
/// is the most recently used tag. So an add shifts every element towards the end, and inserts
|
||||
/// the new element at the start. We lose the last element.
|
||||
/// This strategy is effective at keeping the most-accessed tags in the cache, but it costs a
|
||||
/// This strategy is effective at keeping the most-accessed items in the cache, but it costs a
|
||||
/// linear shift across the entire cache when we add a new tag.
|
||||
fn add(&mut self, idx: usize, tag: SbTag) {
|
||||
self.tags.copy_within(0..CACHE_LEN - 1, 1);
|
||||
self.tags[0] = tag;
|
||||
fn add(&mut self, idx: usize, item: Item) {
|
||||
self.items.copy_within(0..CACHE_LEN - 1, 1);
|
||||
self.items[0] = item;
|
||||
self.idx.copy_within(0..CACHE_LEN - 1, 1);
|
||||
self.idx[0] = idx;
|
||||
}
|
||||
@ -80,20 +80,20 @@ impl Eq for Stack {}
|
||||
|
||||
impl<'tcx> Stack {
|
||||
/// Panics if any of the caching mechanisms have broken,
|
||||
/// - The StackCache indices don't refer to the parallel tags,
|
||||
/// - There are no Unique tags outside of first_unique..last_unique
|
||||
/// - The StackCache indices don't refer to the parallel items,
|
||||
/// - There are no Unique items outside of first_unique..last_unique
|
||||
#[cfg(feature = "expensive-debug-assertions")]
|
||||
fn verify_cache_consistency(&self) {
|
||||
// Only a full cache needs to be valid. Also see the comments in find_granting_cache
|
||||
// and set_unknown_bottom.
|
||||
if self.borrows.len() >= CACHE_LEN {
|
||||
for (tag, stack_idx) in self.cache.tags.iter().zip(self.cache.idx.iter()) {
|
||||
assert_eq!(self.borrows[*stack_idx].tag, *tag);
|
||||
for (tag, stack_idx) in self.cache.items.iter().zip(self.cache.idx.iter()) {
|
||||
assert_eq!(self.borrows[*stack_idx], *tag);
|
||||
}
|
||||
}
|
||||
|
||||
for (idx, item) in self.borrows.iter().enumerate() {
|
||||
if item.perm == Permission::Unique {
|
||||
if item.perm() == Permission::Unique {
|
||||
assert!(
|
||||
self.unique_range.contains(&idx),
|
||||
"{:?} {:?}",
|
||||
@ -128,7 +128,7 @@ pub(super) fn find_granting(
|
||||
.rev() // search top-to-bottom
|
||||
.find_map(|(idx, item)| {
|
||||
// If the item fits and *might* be this wildcard, use it.
|
||||
if item.perm.grants(access) && exposed_tags.contains(&item.tag) {
|
||||
if item.perm().grants(access) && exposed_tags.contains(&item.tag()) {
|
||||
Some(idx)
|
||||
} else {
|
||||
None
|
||||
@ -161,9 +161,9 @@ fn find_granting_tagged(&mut self, access: AccessKind, tag: SbTag) -> Option<usi
|
||||
// If we didn't find the tag in the cache, fall back to a linear search of the
|
||||
// whole stack, and add the tag to the cache.
|
||||
for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
|
||||
if tag == item.tag && item.perm.grants(access) {
|
||||
if tag == item.tag() && item.perm().grants(access) {
|
||||
#[cfg(feature = "stack-cache")]
|
||||
self.cache.add(stack_idx, tag);
|
||||
self.cache.add(stack_idx, *item);
|
||||
return Some(stack_idx);
|
||||
}
|
||||
}
|
||||
@ -175,7 +175,7 @@ fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option<usiz
|
||||
// This looks like a common-sense optimization; we're going to do a linear search of the
|
||||
// cache or the borrow stack to scan the shorter of the two. This optimization is miniscule
|
||||
// and this check actually ensures we do not access an invalid cache.
|
||||
// When a stack is created and when tags are removed from the top of the borrow stack, we
|
||||
// When a stack is created and when items are removed from the top of the borrow stack, we
|
||||
// need some valid value to populate the cache. In both cases, we try to use the bottom
|
||||
// item. But when the stack is cleared in `set_unknown_bottom` there is nothing we could
|
||||
// place in the cache that is correct. But due to the way we populate the cache in
|
||||
@ -185,21 +185,23 @@ fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option<usiz
|
||||
return None;
|
||||
}
|
||||
// Search the cache for the tag we're looking up
|
||||
let cache_idx = self.cache.tags.iter().position(|t| *t == tag)?;
|
||||
let cache_idx = self.cache.items.iter().position(|t| t.tag() == tag)?;
|
||||
let stack_idx = self.cache.idx[cache_idx];
|
||||
// If we found the tag, look up its position in the stack to see if it grants
|
||||
// the required permission
|
||||
if self.borrows[stack_idx].perm.grants(access) {
|
||||
if self.cache.items[cache_idx].perm().grants(access) {
|
||||
// If it does, and it's not already in the most-recently-used position, re-insert it at
|
||||
// the most-recently-used position. This technically reduces the efficiency of the
|
||||
// cache by duplicating elements, but current benchmarks do not seem to benefit from
|
||||
// avoiding this duplication.
|
||||
// But if the tag is in position 1, avoiding the duplicating add is trivial.
|
||||
// If it does, and it's not already in the most-recently-used position, move it there.
|
||||
// Except if the tag is in position 1, this is equivalent to just a swap, so do that.
|
||||
if cache_idx == 1 {
|
||||
self.cache.tags.swap(0, 1);
|
||||
self.cache.items.swap(0, 1);
|
||||
self.cache.idx.swap(0, 1);
|
||||
} else if cache_idx > 1 {
|
||||
self.cache.add(stack_idx, tag);
|
||||
self.cache.add(stack_idx, self.cache.items[cache_idx]);
|
||||
}
|
||||
Some(stack_idx)
|
||||
} else {
|
||||
@ -224,7 +226,7 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
|
||||
if self.unique_range.end >= new_idx {
|
||||
self.unique_range.end += 1;
|
||||
}
|
||||
if new.perm == Permission::Unique {
|
||||
if new.perm() == Permission::Unique {
|
||||
// Make sure the possibly-unique range contains the new borrow
|
||||
self.unique_range.start = self.unique_range.start.min(new_idx);
|
||||
self.unique_range.end = self.unique_range.end.max(new_idx + 1);
|
||||
@ -233,7 +235,7 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
|
||||
// The above insert changes the meaning of every index in the cache >= new_idx, so now
|
||||
// we need to find every one of those indexes and increment it.
|
||||
// But if the insert is at the end (equivalent to a push), we can skip this step because
|
||||
// it didn't change the position of any other tags.
|
||||
// it didn't change the position of any other items.
|
||||
if new_idx != self.borrows.len() - 1 {
|
||||
for idx in &mut self.cache.idx {
|
||||
if *idx >= new_idx {
|
||||
@ -243,7 +245,7 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
|
||||
}
|
||||
|
||||
// This primes the cache for the next access, which is almost always the just-added tag.
|
||||
self.cache.add(new_idx, new.tag);
|
||||
self.cache.add(new_idx, new);
|
||||
|
||||
#[cfg(feature = "expensive-debug-assertions")]
|
||||
self.verify_cache_consistency();
|
||||
@ -255,9 +257,9 @@ pub fn new(item: Item) -> Self {
|
||||
borrows: vec![item],
|
||||
unknown_bottom: None,
|
||||
#[cfg(feature = "stack-cache")]
|
||||
cache: StackCache { idx: [0; CACHE_LEN], tags: [item.tag; CACHE_LEN] },
|
||||
cache: StackCache { idx: [0; CACHE_LEN], items: [item; CACHE_LEN] },
|
||||
#[cfg(feature = "stack-cache")]
|
||||
unique_range: if item.perm == Permission::Unique { 0..1 } else { 0..0 },
|
||||
unique_range: if item.perm() == Permission::Unique { 0..1 } else { 0..0 },
|
||||
}
|
||||
}
|
||||
|
||||
@ -298,10 +300,16 @@ pub fn disable_uniques_starting_at<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
||||
let lower = unique_range.start.max(disable_start);
|
||||
let upper = (unique_range.end + 1).min(self.borrows.len());
|
||||
for item in &mut self.borrows[lower..upper] {
|
||||
if item.perm == Permission::Unique {
|
||||
if item.perm() == Permission::Unique {
|
||||
log::trace!("access: disabling item {:?}", item);
|
||||
visitor(*item)?;
|
||||
item.perm = Permission::Disabled;
|
||||
item.set_permission(Permission::Disabled);
|
||||
// Also update all copies of this item in the cache.
|
||||
for it in &mut self.cache.items {
|
||||
if it.tag() == item.tag() {
|
||||
it.set_permission(Permission::Disabled);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -341,7 +349,7 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
||||
// also possible that the whole cache is still valid. So we call this method to repair what
|
||||
// aspects of the cache are now invalid, instead of resetting the whole thing to a trivially
|
||||
// valid default state.
|
||||
let base_tag = self.borrows[0].tag;
|
||||
let base_tag = self.borrows[0];
|
||||
let mut removed = 0;
|
||||
let mut cursor = 0;
|
||||
// Remove invalid entries from the cache by rotating them to the end of the cache, then
|
||||
@ -350,7 +358,7 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
||||
for _ in 0..CACHE_LEN - 1 {
|
||||
if self.cache.idx[cursor] >= start {
|
||||
self.cache.idx[cursor..CACHE_LEN - removed].rotate_left(1);
|
||||
self.cache.tags[cursor..CACHE_LEN - removed].rotate_left(1);
|
||||
self.cache.items[cursor..CACHE_LEN - removed].rotate_left(1);
|
||||
removed += 1;
|
||||
} else {
|
||||
cursor += 1;
|
||||
@ -358,7 +366,7 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
||||
}
|
||||
for i in CACHE_LEN - removed - 1..CACHE_LEN {
|
||||
self.cache.idx[i] = 0;
|
||||
self.cache.tags[i] = base_tag;
|
||||
self.cache.items[i] = base_tag;
|
||||
}
|
||||
|
||||
if start < self.unique_range.start.saturating_sub(1) {
|
||||
|
@ -281,6 +281,10 @@ fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameDa
|
||||
&mut self.threads[self.active_thread].stack
|
||||
}
|
||||
|
||||
pub fn all_stacks(&self) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>]> {
|
||||
self.threads.iter().map(|t| &t.stack[..])
|
||||
}
|
||||
|
||||
/// Create a new thread and returns its id.
|
||||
fn create_thread(&mut self) -> ThreadId {
|
||||
let new_thread_id = ThreadId::new(self.threads.len());
|
||||
|
@ -97,7 +97,7 @@ macro_rules! regexes {
|
||||
// erase specific alignments
|
||||
"alignment [0-9]+" => "alignment ALIGN",
|
||||
// erase thread caller ids
|
||||
r"\(call [0-9]+\)" => "(call ID)",
|
||||
r"call [0-9]+" => "call ID",
|
||||
// erase platform module paths
|
||||
"sys::[a-z]+::" => "sys::PLATFORM::",
|
||||
// Windows file paths
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
--> $DIR/aliasing_mut1.rs:LL:CC
|
||||
|
|
||||
LL | pub fn safe(_x: &mut i32, _y: &mut i32) {}
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [SharedReadOnly for <TAG>] is protected by call ID
|
||||
--> $DIR/aliasing_mut2.rs:LL:CC
|
||||
|
|
||||
LL | pub fn safe(_x: &i32, _y: &mut i32) {}
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item [SharedReadOnly for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [SharedReadOnly for <TAG>] is protected by call ID
|
||||
--> $DIR/aliasing_mut4.rs:LL:CC
|
||||
|
|
||||
LL | pub fn safe(_x: &i32, _y: &mut Cell<i32>) {}
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item [SharedReadOnly for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@error-pattern: deallocating while item is protected
|
||||
//@error-pattern: deallocating while item
|
||||
|
||||
fn inner(x: &mut i32, f: fn(&mut i32)) {
|
||||
// `f` may mutate, but it may not deallocate!
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: deallocating while item is protected: [Unique for <TAG> (call ID)]
|
||||
error: Undefined Behavior: deallocating while item [Unique for <TAG>] is protected by call ID
|
||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [Unique for <TAG> (call ID)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item [Unique for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@error-pattern: deallocating while item is protected
|
||||
//@error-pattern: deallocating while item
|
||||
use std::marker::PhantomPinned;
|
||||
|
||||
pub struct NotUnpin(i32, PhantomPinned);
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: deallocating while item is protected: [SharedReadWrite for <TAG> (call ID)]
|
||||
error: Undefined Behavior: deallocating while item [SharedReadWrite for <TAG>] is protected by call ID
|
||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [SharedReadWrite for <TAG> (call ID)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item [SharedReadWrite for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
--> $DIR/illegal_write6.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { *y = 2 };
|
||||
| ^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
| ^^^^^^ not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
--> $DIR/invalidate_against_barrier1.rs:LL:CC
|
||||
|
|
||||
LL | let _val = unsafe { *x };
|
||||
| ^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
| ^^ not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [SharedReadOnly for <TAG>] is protected by call ID
|
||||
--> $DIR/invalidate_against_barrier2.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { *x = 0 };
|
||||
| ^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
||||
| ^^^^^^ not granting access to tag <TAG> because incompatible item [SharedReadOnly for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,5 +1,5 @@
|
||||
//@compile-flags: -Zmiri-retag-fields
|
||||
//@error-pattern: incompatible item is protected
|
||||
//@error-pattern: is protected by call
|
||||
struct Newtype<'a>(&'a mut i32);
|
||||
|
||||
fn dealloc_while_running(_n: Newtype<'_>, dealloc: impl FnOnce()) {
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
||||
|
|
||||
LL | Box(unsafe { Unique::new_unchecked(raw) }, alloc)
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item [Unique for <TAG>] is protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
Loading…
Reference in New Issue
Block a user