Store protectors outside Item, pack Tag and Perm
Previously, Item was a struct of a NonZeroU64, an Option which was usually unset or irrelevant, and a 4-variant enum. So collectively, the size of an Item was 24 bytes, but only 8 bytes were used for the most part. So this takes advantage of the fact that it is probably impossible to exhaust the total space of SbTags, and steals 3 bits from it to pack the whole struct into a single u64. This bit-packing means that we reduce peak memory usage when Miri goes memory-bound by ~3x. We also get CPU performance improvements of varying size, because not only are we simply accessing less memory, we can now compare a Vec<Item> using a memcmp because it does not have any padding.
This commit is contained in:
parent
6e106617f1
commit
afa1dddcf9
@ -54,15 +54,18 @@ pub struct FrameData<'tcx> {
|
|||||||
/// for the start of this frame. When we finish executing this frame,
|
/// for the start of this frame. When we finish executing this frame,
|
||||||
/// we use this to register a completed event with `measureme`.
|
/// we use this to register a completed event with `measureme`.
|
||||||
pub timing: Option<measureme::DetachedTiming>,
|
pub timing: Option<measureme::DetachedTiming>,
|
||||||
|
|
||||||
|
pub protected_tags: Vec<SbTag>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
|
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
// Omitting `timing`, it does not support `Debug`.
|
// Omitting `timing`, it does not support `Debug`.
|
||||||
let FrameData { call_id, catch_unwind, timing: _ } = self;
|
let FrameData { call_id, catch_unwind, timing: _, protected_tags } = self;
|
||||||
f.debug_struct("FrameData")
|
f.debug_struct("FrameData")
|
||||||
.field("call_id", call_id)
|
.field("call_id", call_id)
|
||||||
.field("catch_unwind", catch_unwind)
|
.field("catch_unwind", catch_unwind)
|
||||||
|
.field("protected_tags", protected_tags)
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -788,6 +791,7 @@ fn memory_read(
|
|||||||
range,
|
range,
|
||||||
machine.stacked_borrows.as_ref().unwrap(),
|
machine.stacked_borrows.as_ref().unwrap(),
|
||||||
machine.current_span(),
|
machine.current_span(),
|
||||||
|
&machine.threads,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
||||||
@ -819,6 +823,7 @@ fn memory_written(
|
|||||||
range,
|
range,
|
||||||
machine.stacked_borrows.as_ref().unwrap(),
|
machine.stacked_borrows.as_ref().unwrap(),
|
||||||
machine.current_span(),
|
machine.current_span(),
|
||||||
|
&machine.threads,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
||||||
@ -852,6 +857,7 @@ fn memory_deallocated(
|
|||||||
tag,
|
tag,
|
||||||
range,
|
range,
|
||||||
machine.stacked_borrows.as_ref().unwrap(),
|
machine.stacked_borrows.as_ref().unwrap(),
|
||||||
|
&machine.threads,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -892,7 +898,7 @@ fn init_frame_extra(
|
|||||||
stacked_borrows.borrow_mut().new_call()
|
stacked_borrows.borrow_mut().new_call()
|
||||||
});
|
});
|
||||||
|
|
||||||
let extra = FrameData { call_id, catch_unwind: None, timing };
|
let extra = FrameData { call_id, catch_unwind: None, timing, protected_tags: Vec::new() };
|
||||||
Ok(frame.with_extra(extra))
|
Ok(frame.with_extra(extra))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -936,7 +942,7 @@ fn after_stack_pop(
|
|||||||
) -> InterpResult<'tcx, StackPopJump> {
|
) -> InterpResult<'tcx, StackPopJump> {
|
||||||
let timing = frame.extra.timing.take();
|
let timing = frame.extra.timing.take();
|
||||||
if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
|
if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
|
||||||
stacked_borrows.borrow_mut().end_call(frame.extra.call_id);
|
stacked_borrows.borrow_mut().end_call(&frame.extra);
|
||||||
}
|
}
|
||||||
let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
|
let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
|
||||||
if let Some(profiler) = ecx.machine.profiler.as_ref() {
|
if let Some(profiler) = ecx.machine.profiler.as_ref() {
|
||||||
|
@ -92,28 +92,100 @@ pub enum Permission {
|
|||||||
Disabled,
|
Disabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An item in the per-location borrow stack.
|
impl Permission {
|
||||||
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
|
const UNIQUE: u64 = 0;
|
||||||
pub struct Item {
|
const SHARED_READ_WRITE: u64 = 1;
|
||||||
/// The permission this item grants.
|
const SHARED_READ_ONLY: u64 = 2;
|
||||||
perm: Permission,
|
const DISABLED: u64 = 3;
|
||||||
/// The pointers the permission is granted to.
|
|
||||||
tag: SbTag,
|
|
||||||
/// An optional protector, ensuring the item cannot get popped until `CallId` is over.
|
|
||||||
protector: Option<CallId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Item {
|
fn to_bits(self) -> u64 {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
match self {
|
||||||
write!(f, "[{:?} for {:?}", self.perm, self.tag)?;
|
Permission::Unique => Self::UNIQUE,
|
||||||
if let Some(call) = self.protector {
|
Permission::SharedReadWrite => Self::SHARED_READ_WRITE,
|
||||||
write!(f, " (call {})", call)?;
|
Permission::SharedReadOnly => Self::SHARED_READ_ONLY,
|
||||||
|
Permission::Disabled => Self::DISABLED,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_bits(perm: u64) -> Self {
|
||||||
|
match perm {
|
||||||
|
Self::UNIQUE => Permission::Unique,
|
||||||
|
Self::SHARED_READ_WRITE => Permission::SharedReadWrite,
|
||||||
|
Self::SHARED_READ_ONLY => Permission::SharedReadOnly,
|
||||||
|
Self::DISABLED => Permission::Disabled,
|
||||||
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
write!(f, "]")?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod item {
|
||||||
|
use super::{Permission, SbTag};
|
||||||
|
use std::fmt;
|
||||||
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
|
/// An item in the per-location borrow stack.
|
||||||
|
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
|
pub struct Item(u64);
|
||||||
|
|
||||||
|
// An Item contains 3 bitfields:
|
||||||
|
// * Bits 0-61 store an SbTag
|
||||||
|
// * Bits 61-63 store a Permission
|
||||||
|
// * Bit 64 stores a flag which indicates if we have a protector
|
||||||
|
const TAG_MASK: u64 = u64::MAX >> 3;
|
||||||
|
const PERM_MASK: u64 = 0x3 << 61;
|
||||||
|
const PROTECTED_MASK: u64 = 0x1 << 63;
|
||||||
|
|
||||||
|
const PERM_SHIFT: u64 = 61;
|
||||||
|
const PROTECTED_SHIFT: u64 = 63;
|
||||||
|
|
||||||
|
impl Item {
|
||||||
|
pub fn new(tag: SbTag, perm: Permission, protected: bool) -> Self {
|
||||||
|
assert!(tag.0.get() <= TAG_MASK);
|
||||||
|
let packed_tag = tag.0.get();
|
||||||
|
let packed_perm = perm.to_bits() << PERM_SHIFT;
|
||||||
|
let packed_protected = (protected as u64) << PROTECTED_SHIFT;
|
||||||
|
|
||||||
|
let new = Self(packed_tag | packed_perm | packed_protected);
|
||||||
|
|
||||||
|
debug_assert!(new.tag() == tag);
|
||||||
|
debug_assert!(new.perm() == perm);
|
||||||
|
debug_assert!(new.protected() == protected);
|
||||||
|
|
||||||
|
new
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The pointers the permission is granted to.
|
||||||
|
pub fn tag(self) -> SbTag {
|
||||||
|
SbTag(NonZeroU64::new(self.0 & TAG_MASK).unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The permission this item grants.
|
||||||
|
pub fn perm(self) -> Permission {
|
||||||
|
Permission::from_bits((self.0 & PERM_MASK) >> PERM_SHIFT)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether or not there is a protector for this tag
|
||||||
|
pub fn protected(self) -> bool {
|
||||||
|
self.0 & PROTECTED_MASK > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the Permission stored in this Item to Permission::Disabled
|
||||||
|
pub fn set_disabled(&mut self) {
|
||||||
|
// Clear the current set permission
|
||||||
|
self.0 &= !PERM_MASK;
|
||||||
|
// Write Permission::Disabled to the Permission bits
|
||||||
|
self.0 |= Permission::Disabled.to_bits() << PERM_SHIFT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Item {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "[{:?} for {:?}]", self.perm(), self.tag())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub use item::Item;
|
||||||
|
|
||||||
/// Extra per-allocation state.
|
/// Extra per-allocation state.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Stacks {
|
pub struct Stacks {
|
||||||
@ -136,8 +208,8 @@ pub struct GlobalStateInner {
|
|||||||
base_ptr_tags: FxHashMap<AllocId, SbTag>,
|
base_ptr_tags: FxHashMap<AllocId, SbTag>,
|
||||||
/// Next unused call ID (for protectors).
|
/// Next unused call ID (for protectors).
|
||||||
next_call_id: CallId,
|
next_call_id: CallId,
|
||||||
/// Those call IDs corresponding to functions that are still running.
|
/// All tags currently protected
|
||||||
active_calls: FxHashSet<CallId>,
|
protected_tags: FxHashSet<SbTag>,
|
||||||
/// The pointer ids to trace
|
/// The pointer ids to trace
|
||||||
tracked_pointer_tags: HashSet<SbTag>,
|
tracked_pointer_tags: HashSet<SbTag>,
|
||||||
/// The call ids to trace
|
/// The call ids to trace
|
||||||
@ -201,7 +273,7 @@ pub fn new(
|
|||||||
next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
|
next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
|
||||||
base_ptr_tags: FxHashMap::default(),
|
base_ptr_tags: FxHashMap::default(),
|
||||||
next_call_id: NonZeroU64::new(1).unwrap(),
|
next_call_id: NonZeroU64::new(1).unwrap(),
|
||||||
active_calls: FxHashSet::default(),
|
protected_tags: FxHashSet::default(),
|
||||||
tracked_pointer_tags,
|
tracked_pointer_tags,
|
||||||
tracked_call_ids,
|
tracked_call_ids,
|
||||||
retag_fields,
|
retag_fields,
|
||||||
@ -221,17 +293,14 @@ pub fn new_call(&mut self) -> CallId {
|
|||||||
if self.tracked_call_ids.contains(&id) {
|
if self.tracked_call_ids.contains(&id) {
|
||||||
register_diagnostic(NonHaltingDiagnostic::CreatedCallId(id));
|
register_diagnostic(NonHaltingDiagnostic::CreatedCallId(id));
|
||||||
}
|
}
|
||||||
assert!(self.active_calls.insert(id));
|
|
||||||
self.next_call_id = NonZeroU64::new(id.get() + 1).unwrap();
|
self.next_call_id = NonZeroU64::new(id.get() + 1).unwrap();
|
||||||
id
|
id
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn end_call(&mut self, id: CallId) {
|
pub fn end_call(&mut self, frame: &machine::FrameData<'_>) {
|
||||||
assert!(self.active_calls.remove(&id));
|
for tag in &frame.protected_tags {
|
||||||
}
|
self.protected_tags.remove(tag);
|
||||||
|
}
|
||||||
fn is_active(&self, id: CallId) -> bool {
|
|
||||||
self.active_calls.contains(&id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn base_ptr_tag(&mut self, id: AllocId) -> SbTag {
|
pub fn base_ptr_tag(&mut self, id: AllocId) -> SbTag {
|
||||||
@ -287,7 +356,7 @@ impl<'tcx> Stack {
|
|||||||
/// Find the first write-incompatible item above the given one --
|
/// Find the first write-incompatible item above the given one --
|
||||||
/// i.e, find the height to which the stack will be truncated when writing to `granting`.
|
/// i.e, find the height to which the stack will be truncated when writing to `granting`.
|
||||||
fn find_first_write_incompatible(&self, granting: usize) -> usize {
|
fn find_first_write_incompatible(&self, granting: usize) -> usize {
|
||||||
let perm = self.get(granting).unwrap().perm;
|
let perm = self.get(granting).unwrap().perm();
|
||||||
match perm {
|
match perm {
|
||||||
Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
|
Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
|
||||||
Permission::Disabled => bug!("Cannot use Disabled for anything"),
|
Permission::Disabled => bug!("Cannot use Disabled for anything"),
|
||||||
@ -299,7 +368,7 @@ fn find_first_write_incompatible(&self, granting: usize) -> usize {
|
|||||||
// The SharedReadWrite *just* above us are compatible, to skip those.
|
// The SharedReadWrite *just* above us are compatible, to skip those.
|
||||||
let mut idx = granting + 1;
|
let mut idx = granting + 1;
|
||||||
while let Some(item) = self.get(idx) {
|
while let Some(item) = self.get(idx) {
|
||||||
if item.perm == Permission::SharedReadWrite {
|
if item.perm() == Permission::SharedReadWrite {
|
||||||
// Go on.
|
// Go on.
|
||||||
idx += 1;
|
idx += 1;
|
||||||
} else {
|
} else {
|
||||||
@ -325,32 +394,44 @@ fn item_popped(
|
|||||||
provoking_access: Option<(SbTagExtra, AllocRange, Size, AccessKind)>, // just for debug printing and error messages
|
provoking_access: Option<(SbTagExtra, AllocRange, Size, AccessKind)>, // just for debug printing and error messages
|
||||||
global: &GlobalStateInner,
|
global: &GlobalStateInner,
|
||||||
alloc_history: &mut AllocHistory,
|
alloc_history: &mut AllocHistory,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
if global.tracked_pointer_tags.contains(&item.tag) {
|
if global.tracked_pointer_tags.contains(&item.tag()) {
|
||||||
register_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(
|
register_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(
|
||||||
*item,
|
*item,
|
||||||
provoking_access.map(|(tag, _alloc_range, _size, access)| (tag, access)),
|
provoking_access.map(|(tag, _alloc_range, _size, access)| (tag, access)),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(call) = item.protector {
|
if !item.protected() {
|
||||||
if global.is_active(call) {
|
return Ok(());
|
||||||
if let Some((tag, _alloc_range, _offset, _access)) = provoking_access {
|
}
|
||||||
Err(err_sb_ub(
|
|
||||||
format!(
|
if global.protected_tags.contains(&item.tag()) {
|
||||||
"not granting access to tag {:?} because incompatible item is protected: {:?}",
|
let call_id = threads
|
||||||
tag, item
|
.all_stacks()
|
||||||
),
|
.flatten()
|
||||||
None,
|
.find(|t| t.extra.protected_tags.contains(&item.tag()))
|
||||||
tag.and_then(|tag| alloc_history.get_logs_relevant_to(tag, Some(item.tag))),
|
.map(|frame| frame.extra.call_id)
|
||||||
))?
|
.unwrap(); // FIXME: Surely we should find something, but a panic seems wrong here?
|
||||||
} else {
|
if let Some((tag, _alloc_range, _offset, _access)) = provoking_access {
|
||||||
Err(err_sb_ub(
|
Err(err_sb_ub(
|
||||||
format!("deallocating while item is protected: {:?}", item),
|
format!(
|
||||||
None,
|
"not granting access to tag {:?} because incompatible item is protected: {:?} (call {:?})",
|
||||||
None,
|
tag, item, call_id
|
||||||
))?
|
),
|
||||||
}
|
None,
|
||||||
|
tag.and_then(|tag| alloc_history.get_logs_relevant_to(tag, Some(item.tag()))),
|
||||||
|
))?
|
||||||
|
} else {
|
||||||
|
Err(err_sb_ub(
|
||||||
|
format!(
|
||||||
|
"deallocating while item is protected: {:?} (call {:?})",
|
||||||
|
item, call_id
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
))?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -369,6 +450,7 @@ fn access(
|
|||||||
current_span: &mut CurrentSpan<'_, '_, 'tcx>,
|
current_span: &mut CurrentSpan<'_, '_, 'tcx>,
|
||||||
alloc_history: &mut AllocHistory,
|
alloc_history: &mut AllocHistory,
|
||||||
exposed_tags: &FxHashSet<SbTag>,
|
exposed_tags: &FxHashSet<SbTag>,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
// Two main steps: Find granting item, remove incompatible items above.
|
// Two main steps: Find granting item, remove incompatible items above.
|
||||||
|
|
||||||
@ -399,8 +481,9 @@ fn access(
|
|||||||
Some((tag, alloc_range, offset, access)),
|
Some((tag, alloc_range, offset, access)),
|
||||||
global,
|
global,
|
||||||
alloc_history,
|
alloc_history,
|
||||||
|
threads,
|
||||||
)?;
|
)?;
|
||||||
alloc_history.log_invalidation(item.tag, alloc_range, current_span);
|
alloc_history.log_invalidation(item.tag(), alloc_range, current_span);
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
} else {
|
} else {
|
||||||
@ -425,8 +508,9 @@ fn access(
|
|||||||
Some((tag, alloc_range, offset, access)),
|
Some((tag, alloc_range, offset, access)),
|
||||||
global,
|
global,
|
||||||
alloc_history,
|
alloc_history,
|
||||||
|
threads,
|
||||||
)?;
|
)?;
|
||||||
alloc_history.log_invalidation(item.tag, alloc_range, current_span);
|
alloc_history.log_invalidation(item.tag(), alloc_range, current_span);
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
@ -439,9 +523,9 @@ fn access(
|
|||||||
for i in 0..self.len() {
|
for i in 0..self.len() {
|
||||||
let item = self.get(i).unwrap();
|
let item = self.get(i).unwrap();
|
||||||
// Skip disabled items, they cannot be matched anyway.
|
// Skip disabled items, they cannot be matched anyway.
|
||||||
if !matches!(item.perm, Permission::Disabled) {
|
if !matches!(item.perm(), Permission::Disabled) {
|
||||||
// We are looking for a strict upper bound, so add 1 to this tag.
|
// We are looking for a strict upper bound, so add 1 to this tag.
|
||||||
max = cmp::max(item.tag.0.checked_add(1).unwrap(), max);
|
max = cmp::max(item.tag().0.checked_add(1).unwrap(), max);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(unk) = self.unknown_bottom() {
|
if let Some(unk) = self.unknown_bottom() {
|
||||||
@ -467,6 +551,7 @@ fn dealloc(
|
|||||||
global: &GlobalStateInner,
|
global: &GlobalStateInner,
|
||||||
alloc_history: &mut AllocHistory,
|
alloc_history: &mut AllocHistory,
|
||||||
exposed_tags: &FxHashSet<SbTag>,
|
exposed_tags: &FxHashSet<SbTag>,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
// Step 1: Make sure there is a granting item.
|
// Step 1: Make sure there is a granting item.
|
||||||
self.find_granting(AccessKind::Write, tag, exposed_tags).map_err(|_| {
|
self.find_granting(AccessKind::Write, tag, exposed_tags).map_err(|_| {
|
||||||
@ -482,7 +567,7 @@ fn dealloc(
|
|||||||
// Step 2: Consider all items removed. This checks for protectors.
|
// Step 2: Consider all items removed. This checks for protectors.
|
||||||
for idx in (0..self.len()).rev() {
|
for idx in (0..self.len()).rev() {
|
||||||
let item = self.get(idx).unwrap();
|
let item = self.get(idx).unwrap();
|
||||||
Stack::item_popped(&item, None, global, alloc_history)?;
|
Stack::item_popped(&item, None, global, alloc_history, threads)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -502,10 +587,11 @@ fn grant(
|
|||||||
current_span: &mut CurrentSpan<'_, '_, 'tcx>,
|
current_span: &mut CurrentSpan<'_, '_, 'tcx>,
|
||||||
alloc_history: &mut AllocHistory,
|
alloc_history: &mut AllocHistory,
|
||||||
exposed_tags: &FxHashSet<SbTag>,
|
exposed_tags: &FxHashSet<SbTag>,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
// Figure out which access `perm` corresponds to.
|
// Figure out which access `perm` corresponds to.
|
||||||
let access =
|
let access =
|
||||||
if new.perm.grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
|
if new.perm().grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
|
||||||
|
|
||||||
// Now we figure out which item grants our parent (`derived_from`) this kind of access.
|
// Now we figure out which item grants our parent (`derived_from`) this kind of access.
|
||||||
// We use that to determine where to put the new item.
|
// We use that to determine where to put the new item.
|
||||||
@ -517,7 +603,7 @@ fn grant(
|
|||||||
// Compute where to put the new item.
|
// Compute where to put the new item.
|
||||||
// Either way, we ensure that we insert the new item in a way such that between
|
// Either way, we ensure that we insert the new item in a way such that between
|
||||||
// `derived_from` and the new one, there are only items *compatible with* `derived_from`.
|
// `derived_from` and the new one, there are only items *compatible with* `derived_from`.
|
||||||
let new_idx = if new.perm == Permission::SharedReadWrite {
|
let new_idx = if new.perm() == Permission::SharedReadWrite {
|
||||||
assert!(
|
assert!(
|
||||||
access == AccessKind::Write,
|
access == AccessKind::Write,
|
||||||
"this case only makes sense for stack-like accesses"
|
"this case only makes sense for stack-like accesses"
|
||||||
@ -550,6 +636,7 @@ fn grant(
|
|||||||
current_span,
|
current_span,
|
||||||
alloc_history,
|
alloc_history,
|
||||||
exposed_tags,
|
exposed_tags,
|
||||||
|
threads,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// We insert "as far up as possible": We know only compatible items are remaining
|
// We insert "as far up as possible": We know only compatible items are remaining
|
||||||
@ -571,7 +658,7 @@ fn grant(
|
|||||||
impl<'tcx> Stacks {
|
impl<'tcx> Stacks {
|
||||||
/// Creates new stack with initial tag.
|
/// Creates new stack with initial tag.
|
||||||
fn new(size: Size, perm: Permission, tag: SbTag) -> Self {
|
fn new(size: Size, perm: Permission, tag: SbTag) -> Self {
|
||||||
let item = Item { perm, tag, protector: None };
|
let item = Item::new(tag, perm, false);
|
||||||
let stack = Stack::new(item);
|
let stack = Stack::new(item);
|
||||||
|
|
||||||
Stacks {
|
Stacks {
|
||||||
@ -637,6 +724,7 @@ pub fn memory_read<'tcx>(
|
|||||||
range: AllocRange,
|
range: AllocRange,
|
||||||
state: &GlobalState,
|
state: &GlobalState,
|
||||||
mut current_span: CurrentSpan<'_, '_, 'tcx>,
|
mut current_span: CurrentSpan<'_, '_, 'tcx>,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
trace!(
|
trace!(
|
||||||
"read access with tag {:?}: {:?}, size {}",
|
"read access with tag {:?}: {:?}, size {}",
|
||||||
@ -654,6 +742,7 @@ pub fn memory_read<'tcx>(
|
|||||||
&mut current_span,
|
&mut current_span,
|
||||||
history,
|
history,
|
||||||
exposed_tags,
|
exposed_tags,
|
||||||
|
threads,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -666,6 +755,7 @@ pub fn memory_written<'tcx>(
|
|||||||
range: AllocRange,
|
range: AllocRange,
|
||||||
state: &GlobalState,
|
state: &GlobalState,
|
||||||
mut current_span: CurrentSpan<'_, '_, 'tcx>,
|
mut current_span: CurrentSpan<'_, '_, 'tcx>,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
trace!(
|
trace!(
|
||||||
"write access with tag {:?}: {:?}, size {}",
|
"write access with tag {:?}: {:?}, size {}",
|
||||||
@ -683,6 +773,7 @@ pub fn memory_written<'tcx>(
|
|||||||
&mut current_span,
|
&mut current_span,
|
||||||
history,
|
history,
|
||||||
exposed_tags,
|
exposed_tags,
|
||||||
|
threads,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -694,11 +785,12 @@ pub fn memory_deallocated<'tcx>(
|
|||||||
tag: SbTagExtra,
|
tag: SbTagExtra,
|
||||||
range: AllocRange,
|
range: AllocRange,
|
||||||
state: &GlobalState,
|
state: &GlobalState,
|
||||||
|
threads: &ThreadManager<'_, 'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
|
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
|
||||||
let state = state.borrow();
|
let state = state.borrow();
|
||||||
self.for_each(range, |offset, stack, history, exposed_tags| {
|
self.for_each(range, |offset, stack, history, exposed_tags| {
|
||||||
stack.dealloc(tag, (alloc_id, range, offset), &state, history, exposed_tags)
|
stack.dealloc(tag, (alloc_id, range, offset), &state, history, exposed_tags, threads)
|
||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -801,7 +893,6 @@ fn reborrow(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let protector = if protect { Some(this.frame().extra.call_id) } else { None };
|
|
||||||
trace!(
|
trace!(
|
||||||
"reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
|
"reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
|
||||||
kind,
|
kind,
|
||||||
@ -812,6 +903,13 @@ fn reborrow(
|
|||||||
size.bytes()
|
size.bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if protect {
|
||||||
|
this.frame_mut().extra.protected_tags.push(new_tag);
|
||||||
|
this.machine.stacked_borrows.as_mut().unwrap().get_mut().protected_tags.insert(new_tag);
|
||||||
|
}
|
||||||
|
// FIXME: can't hold the current span handle across the borrows of self above
|
||||||
|
let current_span = &mut this.machine.current_span();
|
||||||
|
|
||||||
// Update the stacks.
|
// Update the stacks.
|
||||||
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
|
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
|
||||||
// There could be existing unique pointers reborrowed from them that should remain valid!
|
// There could be existing unique pointers reborrowed from them that should remain valid!
|
||||||
@ -848,15 +946,16 @@ fn reborrow(
|
|||||||
} else {
|
} else {
|
||||||
Permission::SharedReadWrite
|
Permission::SharedReadWrite
|
||||||
};
|
};
|
||||||
let protector = if frozen {
|
let protected = if frozen {
|
||||||
protector
|
protect
|
||||||
} else {
|
} else {
|
||||||
// We do not protect inside UnsafeCell.
|
// We do not protect inside UnsafeCell.
|
||||||
// This fixes https://github.com/rust-lang/rust/issues/55005.
|
// This fixes https://github.com/rust-lang/rust/issues/55005.
|
||||||
None
|
false
|
||||||
};
|
};
|
||||||
let item = Item { perm, tag: new_tag, protector };
|
let item = Item::new(new_tag, perm, protected);
|
||||||
let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
||||||
|
let threads = &this.machine.threads;
|
||||||
stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
|
stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
|
||||||
stack.grant(
|
stack.grant(
|
||||||
orig_tag,
|
orig_tag,
|
||||||
@ -866,6 +965,7 @@ fn reborrow(
|
|||||||
current_span,
|
current_span,
|
||||||
history,
|
history,
|
||||||
exposed_tags,
|
exposed_tags,
|
||||||
|
threads,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})?;
|
})?;
|
||||||
@ -881,9 +981,10 @@ fn reborrow(
|
|||||||
.as_mut()
|
.as_mut()
|
||||||
.expect("we should have Stacked Borrows data")
|
.expect("we should have Stacked Borrows data")
|
||||||
.borrow_mut();
|
.borrow_mut();
|
||||||
let item = Item { perm, tag: new_tag, protector };
|
let item = Item::new(new_tag, perm, protect);
|
||||||
let range = alloc_range(base_offset, size);
|
let range = alloc_range(base_offset, size);
|
||||||
let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
||||||
|
let threads = &machine.threads;
|
||||||
let current_span = &mut machine.current_span(); // `get_alloc_extra_mut` invalidated our old `current_span`
|
let current_span = &mut machine.current_span(); // `get_alloc_extra_mut` invalidated our old `current_span`
|
||||||
stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
|
stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
|
||||||
stack.grant(
|
stack.grant(
|
||||||
@ -894,6 +995,7 @@ fn reborrow(
|
|||||||
current_span,
|
current_span,
|
||||||
history,
|
history,
|
||||||
exposed_tags,
|
exposed_tags,
|
||||||
|
threads,
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ pub fn grant_error<'tcx>(
|
|||||||
) -> InterpError<'tcx> {
|
) -> InterpError<'tcx> {
|
||||||
let action = format!(
|
let action = format!(
|
||||||
"trying to reborrow {derived_from:?} for {new_perm:?} permission at {alloc_id:?}[{offset:#x}]",
|
"trying to reborrow {derived_from:?} for {new_perm:?} permission at {alloc_id:?}[{offset:#x}]",
|
||||||
new_perm = new.perm,
|
new_perm = new.perm(),
|
||||||
offset = error_offset.bytes(),
|
offset = error_offset.bytes(),
|
||||||
);
|
);
|
||||||
err_sb_ub(
|
err_sb_ub(
|
||||||
@ -185,7 +185,7 @@ fn error_cause(stack: &Stack, tag: SbTagExtra) -> &'static str {
|
|||||||
if let SbTagExtra::Concrete(tag) = tag {
|
if let SbTagExtra::Concrete(tag) = tag {
|
||||||
if (0..stack.len())
|
if (0..stack.len())
|
||||||
.map(|i| stack.get(i).unwrap())
|
.map(|i| stack.get(i).unwrap())
|
||||||
.any(|item| item.tag == tag && item.perm != Permission::Disabled)
|
.any(|item| item.tag() == tag && item.perm() != Permission::Disabled)
|
||||||
{
|
{
|
||||||
", but that tag only grants SharedReadOnly permission for this location"
|
", but that tag only grants SharedReadOnly permission for this location"
|
||||||
} else {
|
} else {
|
||||||
|
@ -37,7 +37,7 @@ pub struct Stack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A very small cache of searches of the borrow stack
|
/// A very small cache of searches of the borrow stack
|
||||||
/// This maps tags to locations in the borrow stack. Any use of this still needs to do a
|
/// This maps items to locations in the borrow stack. Any use of this still needs to do a
|
||||||
/// probably-cold random access into the borrow stack to figure out what `Permission` an
|
/// probably-cold random access into the borrow stack to figure out what `Permission` an
|
||||||
/// `SbTag` grants. We could avoid this by also storing the `Permission` in the cache, but
|
/// `SbTag` grants. We could avoid this by also storing the `Permission` in the cache, but
|
||||||
/// most lookups into the cache are immediately followed by access of the full borrow stack anyway.
|
/// most lookups into the cache are immediately followed by access of the full borrow stack anyway.
|
||||||
@ -48,7 +48,7 @@ pub struct Stack {
|
|||||||
#[cfg(feature = "stack-cache")]
|
#[cfg(feature = "stack-cache")]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct StackCache {
|
struct StackCache {
|
||||||
tags: [SbTag; CACHE_LEN], // Hot in find_granting
|
items: [Item; CACHE_LEN], // Hot in find_granting
|
||||||
idx: [usize; CACHE_LEN], // Hot in grant
|
idx: [usize; CACHE_LEN], // Hot in grant
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,11 +59,11 @@ impl StackCache {
|
|||||||
/// We use the position in the cache to represent how recently a tag was used; the first position
|
/// We use the position in the cache to represent how recently a tag was used; the first position
|
||||||
/// is the most recently used tag. So an add shifts every element towards the end, and inserts
|
/// is the most recently used tag. So an add shifts every element towards the end, and inserts
|
||||||
/// the new element at the start. We lose the last element.
|
/// the new element at the start. We lose the last element.
|
||||||
/// This strategy is effective at keeping the most-accessed tags in the cache, but it costs a
|
/// This strategy is effective at keeping the most-accessed items in the cache, but it costs a
|
||||||
/// linear shift across the entire cache when we add a new tag.
|
/// linear shift across the entire cache when we add a new tag.
|
||||||
fn add(&mut self, idx: usize, tag: SbTag) {
|
fn add(&mut self, idx: usize, item: Item) {
|
||||||
self.tags.copy_within(0..CACHE_LEN - 1, 1);
|
self.items.copy_within(0..CACHE_LEN - 1, 1);
|
||||||
self.tags[0] = tag;
|
self.items[0] = item;
|
||||||
self.idx.copy_within(0..CACHE_LEN - 1, 1);
|
self.idx.copy_within(0..CACHE_LEN - 1, 1);
|
||||||
self.idx[0] = idx;
|
self.idx[0] = idx;
|
||||||
}
|
}
|
||||||
@ -80,20 +80,20 @@ impl Eq for Stack {}
|
|||||||
|
|
||||||
impl<'tcx> Stack {
|
impl<'tcx> Stack {
|
||||||
/// Panics if any of the caching mechanisms have broken,
|
/// Panics if any of the caching mechanisms have broken,
|
||||||
/// - The StackCache indices don't refer to the parallel tags,
|
/// - The StackCache indices don't refer to the parallel items,
|
||||||
/// - There are no Unique tags outside of first_unique..last_unique
|
/// - There are no Unique items outside of first_unique..last_unique
|
||||||
#[cfg(feature = "expensive-debug-assertions")]
|
#[cfg(feature = "expensive-debug-assertions")]
|
||||||
fn verify_cache_consistency(&self) {
|
fn verify_cache_consistency(&self) {
|
||||||
// Only a full cache needs to be valid. Also see the comments in find_granting_cache
|
// Only a full cache needs to be valid. Also see the comments in find_granting_cache
|
||||||
// and set_unknown_bottom.
|
// and set_unknown_bottom.
|
||||||
if self.borrows.len() >= CACHE_LEN {
|
if self.borrows.len() >= CACHE_LEN {
|
||||||
for (tag, stack_idx) in self.cache.tags.iter().zip(self.cache.idx.iter()) {
|
for (tag, stack_idx) in self.cache.items.iter().zip(self.cache.idx.iter()) {
|
||||||
assert_eq!(self.borrows[*stack_idx].tag, *tag);
|
assert_eq!(self.borrows[*stack_idx], *tag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (idx, item) in self.borrows.iter().enumerate() {
|
for (idx, item) in self.borrows.iter().enumerate() {
|
||||||
if item.perm == Permission::Unique {
|
if item.perm() == Permission::Unique {
|
||||||
assert!(
|
assert!(
|
||||||
self.unique_range.contains(&idx),
|
self.unique_range.contains(&idx),
|
||||||
"{:?} {:?}",
|
"{:?} {:?}",
|
||||||
@ -128,7 +128,7 @@ pub(super) fn find_granting(
|
|||||||
.rev() // search top-to-bottom
|
.rev() // search top-to-bottom
|
||||||
.find_map(|(idx, item)| {
|
.find_map(|(idx, item)| {
|
||||||
// If the item fits and *might* be this wildcard, use it.
|
// If the item fits and *might* be this wildcard, use it.
|
||||||
if item.perm.grants(access) && exposed_tags.contains(&item.tag) {
|
if item.perm().grants(access) && exposed_tags.contains(&item.tag()) {
|
||||||
Some(idx)
|
Some(idx)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@ -161,9 +161,9 @@ fn find_granting_tagged(&mut self, access: AccessKind, tag: SbTag) -> Option<usi
|
|||||||
// If we didn't find the tag in the cache, fall back to a linear search of the
|
// If we didn't find the tag in the cache, fall back to a linear search of the
|
||||||
// whole stack, and add the tag to the cache.
|
// whole stack, and add the tag to the cache.
|
||||||
for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
|
for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
|
||||||
if tag == item.tag && item.perm.grants(access) {
|
if tag == item.tag() && item.perm().grants(access) {
|
||||||
#[cfg(feature = "stack-cache")]
|
#[cfg(feature = "stack-cache")]
|
||||||
self.cache.add(stack_idx, tag);
|
self.cache.add(stack_idx, *item);
|
||||||
return Some(stack_idx);
|
return Some(stack_idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -175,7 +175,7 @@ fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option<usiz
|
|||||||
// This looks like a common-sense optimization; we're going to do a linear search of the
|
// This looks like a common-sense optimization; we're going to do a linear search of the
|
||||||
// cache or the borrow stack to scan the shorter of the two. This optimization is miniscule
|
// cache or the borrow stack to scan the shorter of the two. This optimization is miniscule
|
||||||
// and this check actually ensures we do not access an invalid cache.
|
// and this check actually ensures we do not access an invalid cache.
|
||||||
// When a stack is created and when tags are removed from the top of the borrow stack, we
|
// When a stack is created and when items are removed from the top of the borrow stack, we
|
||||||
// need some valid value to populate the cache. In both cases, we try to use the bottom
|
// need some valid value to populate the cache. In both cases, we try to use the bottom
|
||||||
// item. But when the stack is cleared in `set_unknown_bottom` there is nothing we could
|
// item. But when the stack is cleared in `set_unknown_bottom` there is nothing we could
|
||||||
// place in the cache that is correct. But due to the way we populate the cache in
|
// place in the cache that is correct. But due to the way we populate the cache in
|
||||||
@ -185,21 +185,23 @@ fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option<usiz
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
// Search the cache for the tag we're looking up
|
// Search the cache for the tag we're looking up
|
||||||
let cache_idx = self.cache.tags.iter().position(|t| *t == tag)?;
|
let cache_idx = self.cache.items.iter().position(|t| t.tag() == tag)?;
|
||||||
let stack_idx = self.cache.idx[cache_idx];
|
let stack_idx = self.cache.idx[cache_idx];
|
||||||
// If we found the tag, look up its position in the stack to see if it grants
|
// If we found the tag, look up its position in the stack to see if it grants
|
||||||
// the required permission
|
// the required permission
|
||||||
if self.borrows[stack_idx].perm.grants(access) {
|
if self.cache.items[cache_idx].perm().grants(access) {
|
||||||
// If it does, and it's not already in the most-recently-used position, re-insert it at
|
// If it does, and it's not already in the most-recently-used position, re-insert it at
|
||||||
// the most-recently-used position. This technically reduces the efficiency of the
|
// the most-recently-used position. This technically reduces the efficiency of the
|
||||||
// cache by duplicating elements, but current benchmarks do not seem to benefit from
|
// cache by duplicating elements, but current benchmarks do not seem to benefit from
|
||||||
// avoiding this duplication.
|
// avoiding this duplication.
|
||||||
// But if the tag is in position 1, avoiding the duplicating add is trivial.
|
// But if the tag is in position 1, avoiding the duplicating add is trivial.
|
||||||
|
// If it does, and it's not already in the most-recently-used position, move it there.
|
||||||
|
// Except if the tag is in position 1, this is equivalent to just a swap, so do that.
|
||||||
if cache_idx == 1 {
|
if cache_idx == 1 {
|
||||||
self.cache.tags.swap(0, 1);
|
self.cache.items.swap(0, 1);
|
||||||
self.cache.idx.swap(0, 1);
|
self.cache.idx.swap(0, 1);
|
||||||
} else if cache_idx > 1 {
|
} else if cache_idx > 1 {
|
||||||
self.cache.add(stack_idx, tag);
|
self.cache.add(stack_idx, self.cache.items[cache_idx]);
|
||||||
}
|
}
|
||||||
Some(stack_idx)
|
Some(stack_idx)
|
||||||
} else {
|
} else {
|
||||||
@ -224,7 +226,7 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
|
|||||||
if self.unique_range.end >= new_idx {
|
if self.unique_range.end >= new_idx {
|
||||||
self.unique_range.end += 1;
|
self.unique_range.end += 1;
|
||||||
}
|
}
|
||||||
if new.perm == Permission::Unique {
|
if new.perm() == Permission::Unique {
|
||||||
// Make sure the possibly-unique range contains the new borrow
|
// Make sure the possibly-unique range contains the new borrow
|
||||||
self.unique_range.start = self.unique_range.start.min(new_idx);
|
self.unique_range.start = self.unique_range.start.min(new_idx);
|
||||||
self.unique_range.end = self.unique_range.end.max(new_idx + 1);
|
self.unique_range.end = self.unique_range.end.max(new_idx + 1);
|
||||||
@ -233,7 +235,7 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
|
|||||||
// The above insert changes the meaning of every index in the cache >= new_idx, so now
|
// The above insert changes the meaning of every index in the cache >= new_idx, so now
|
||||||
// we need to find every one of those indexes and increment it.
|
// we need to find every one of those indexes and increment it.
|
||||||
// But if the insert is at the end (equivalent to a push), we can skip this step because
|
// But if the insert is at the end (equivalent to a push), we can skip this step because
|
||||||
// it didn't change the position of any other tags.
|
// it didn't change the position of any other items.
|
||||||
if new_idx != self.borrows.len() - 1 {
|
if new_idx != self.borrows.len() - 1 {
|
||||||
for idx in &mut self.cache.idx {
|
for idx in &mut self.cache.idx {
|
||||||
if *idx >= new_idx {
|
if *idx >= new_idx {
|
||||||
@ -243,7 +245,7 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This primes the cache for the next access, which is almost always the just-added tag.
|
// This primes the cache for the next access, which is almost always the just-added tag.
|
||||||
self.cache.add(new_idx, new.tag);
|
self.cache.add(new_idx, new);
|
||||||
|
|
||||||
#[cfg(feature = "expensive-debug-assertions")]
|
#[cfg(feature = "expensive-debug-assertions")]
|
||||||
self.verify_cache_consistency();
|
self.verify_cache_consistency();
|
||||||
@ -255,9 +257,9 @@ pub fn new(item: Item) -> Self {
|
|||||||
borrows: vec![item],
|
borrows: vec![item],
|
||||||
unknown_bottom: None,
|
unknown_bottom: None,
|
||||||
#[cfg(feature = "stack-cache")]
|
#[cfg(feature = "stack-cache")]
|
||||||
cache: StackCache { idx: [0; CACHE_LEN], tags: [item.tag; CACHE_LEN] },
|
cache: StackCache { idx: [0; CACHE_LEN], items: [item; CACHE_LEN] },
|
||||||
#[cfg(feature = "stack-cache")]
|
#[cfg(feature = "stack-cache")]
|
||||||
unique_range: if item.perm == Permission::Unique { 0..1 } else { 0..0 },
|
unique_range: if item.perm() == Permission::Unique { 0..1 } else { 0..0 },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -298,10 +300,15 @@ pub fn disable_uniques_starting_at<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
|||||||
let lower = unique_range.start.max(disable_start);
|
let lower = unique_range.start.max(disable_start);
|
||||||
let upper = (unique_range.end + 1).min(self.borrows.len());
|
let upper = (unique_range.end + 1).min(self.borrows.len());
|
||||||
for item in &mut self.borrows[lower..upper] {
|
for item in &mut self.borrows[lower..upper] {
|
||||||
if item.perm == Permission::Unique {
|
if item.perm() == Permission::Unique {
|
||||||
log::trace!("access: disabling item {:?}", item);
|
log::trace!("access: disabling item {:?}", item);
|
||||||
visitor(*item)?;
|
visitor(*item)?;
|
||||||
item.perm = Permission::Disabled;
|
item.set_disabled();
|
||||||
|
for t in &mut self.cache.items {
|
||||||
|
if t.tag() == item.tag() {
|
||||||
|
t.set_disabled();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -341,7 +348,7 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
|||||||
// also possible that the whole cache is still valid. So we call this method to repair what
|
// also possible that the whole cache is still valid. So we call this method to repair what
|
||||||
// aspects of the cache are now invalid, instead of resetting the whole thing to a trivially
|
// aspects of the cache are now invalid, instead of resetting the whole thing to a trivially
|
||||||
// valid default state.
|
// valid default state.
|
||||||
let base_tag = self.borrows[0].tag;
|
let base_tag = self.borrows[0];
|
||||||
let mut removed = 0;
|
let mut removed = 0;
|
||||||
let mut cursor = 0;
|
let mut cursor = 0;
|
||||||
// Remove invalid entries from the cache by rotating them to the end of the cache, then
|
// Remove invalid entries from the cache by rotating them to the end of the cache, then
|
||||||
@ -350,7 +357,7 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
|||||||
for _ in 0..CACHE_LEN - 1 {
|
for _ in 0..CACHE_LEN - 1 {
|
||||||
if self.cache.idx[cursor] >= start {
|
if self.cache.idx[cursor] >= start {
|
||||||
self.cache.idx[cursor..CACHE_LEN - removed].rotate_left(1);
|
self.cache.idx[cursor..CACHE_LEN - removed].rotate_left(1);
|
||||||
self.cache.tags[cursor..CACHE_LEN - removed].rotate_left(1);
|
self.cache.items[cursor..CACHE_LEN - removed].rotate_left(1);
|
||||||
removed += 1;
|
removed += 1;
|
||||||
} else {
|
} else {
|
||||||
cursor += 1;
|
cursor += 1;
|
||||||
@ -358,7 +365,7 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
|||||||
}
|
}
|
||||||
for i in CACHE_LEN - removed - 1..CACHE_LEN {
|
for i in CACHE_LEN - removed - 1..CACHE_LEN {
|
||||||
self.cache.idx[i] = 0;
|
self.cache.idx[i] = 0;
|
||||||
self.cache.tags[i] = base_tag;
|
self.cache.items[i] = base_tag;
|
||||||
}
|
}
|
||||||
|
|
||||||
if start < self.unique_range.start.saturating_sub(1) {
|
if start < self.unique_range.start.saturating_sub(1) {
|
||||||
|
@ -281,6 +281,10 @@ fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameDa
|
|||||||
&mut self.threads[self.active_thread].stack
|
&mut self.threads[self.active_thread].stack
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn all_stacks(&self) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>]> {
|
||||||
|
self.threads.iter().map(|t| &t.stack[..])
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a new thread and returns its id.
|
/// Create a new thread and returns its id.
|
||||||
fn create_thread(&mut self) -> ThreadId {
|
fn create_thread(&mut self) -> ThreadId {
|
||||||
let new_thread_id = ThreadId::new(self.threads.len());
|
let new_thread_id = ThreadId::new(self.threads.len());
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
--> $DIR/aliasing_mut1.rs:LL:CC
|
--> $DIR/aliasing_mut1.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | pub fn safe(_x: &mut i32, _y: &mut i32) {}
|
LL | pub fn safe(_x: &mut i32, _y: &mut i32) {}
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG>] (call ID)
|
||||||
--> $DIR/aliasing_mut2.rs:LL:CC
|
--> $DIR/aliasing_mut2.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | pub fn safe(_x: &i32, _y: &mut i32) {}
|
LL | pub fn safe(_x: &i32, _y: &mut i32) {}
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG>] (call ID)
|
||||||
--> $DIR/aliasing_mut4.rs:LL:CC
|
--> $DIR/aliasing_mut4.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | pub fn safe(_x: &i32, _y: &mut Cell<i32>) {}
|
LL | pub fn safe(_x: &i32, _y: &mut Cell<i32>) {}
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: deallocating while item is protected: [Unique for <TAG> (call ID)]
|
error: Undefined Behavior: deallocating while item is protected: [Unique for <TAG>] (call ID)
|
||||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [Unique for <TAG> (call ID)]
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [Unique for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: deallocating while item is protected: [SharedReadWrite for <TAG> (call ID)]
|
error: Undefined Behavior: deallocating while item is protected: [SharedReadWrite for <TAG>] (call ID)
|
||||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [SharedReadWrite for <TAG> (call ID)]
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [SharedReadWrite for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
--> $DIR/illegal_write6.rs:LL:CC
|
--> $DIR/illegal_write6.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | unsafe { *y = 2 };
|
LL | unsafe { *y = 2 };
|
||||||
| ^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
| ^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
--> $DIR/invalidate_against_barrier1.rs:LL:CC
|
--> $DIR/invalidate_against_barrier1.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | let _val = unsafe { *x };
|
LL | let _val = unsafe { *x };
|
||||||
| ^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
| ^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG>] (call ID)
|
||||||
--> $DIR/invalidate_against_barrier2.rs:LL:CC
|
--> $DIR/invalidate_against_barrier2.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | unsafe { *x = 0 };
|
LL | unsafe { *x = 0 };
|
||||||
| ^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG> (call ID)]
|
| ^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [SharedReadOnly for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
error: Undefined Behavior: not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
||||||
|
|
|
|
||||||
LL | Box(unsafe { Unique::new_unchecked(raw) }, alloc)
|
LL | Box(unsafe { Unique::new_unchecked(raw) }, alloc)
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG> (call ID)]
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because incompatible item is protected: [Unique for <TAG>] (call ID)
|
||||||
|
|
|
|
||||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||||
|
Loading…
Reference in New Issue
Block a user