diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs
index 8fbee9a3522..dacb3a9b88f 100644
--- a/src/tools/miri/src/concurrency/thread.rs
+++ b/src/tools/miri/src/concurrency/thread.rs
@@ -118,6 +118,13 @@ pub struct Thread<'mir, 'tcx> {
/// The virtual call stack.
stack: Vec>>,
+ /// The index of the topmost user-relevant frame in `stack`. This field must contain
+ /// the value produced by `get_top_user_relevant_frame`.
+ /// The `None` state here represents
+ /// This field is a cache to reduce how often we call that method. The cache is manually
+ /// maintained inside `MiriMachine::after_stack_push` and `MiriMachine::after_stack_pop`.
+ top_user_relevant_frame: Option,
+
/// The join status.
join_status: ThreadJoinStatus,
@@ -147,6 +154,40 @@ fn check_terminated(&mut self) -> bool {
fn thread_name(&self) -> &[u8] {
if let Some(ref thread_name) = self.thread_name { thread_name } else { b"" }
}
+
+ /// Return the top user-relevant frame, if there is one.
+ /// Note that the choice to return `None` here when there is no user-relevant frame is part of
+ /// justifying the optimization that only pushes of user-relevant frames require updating the
+ /// `top_user_relevant_frame` field.
+ fn compute_top_user_relevant_frame(&self) -> Option {
+ self.stack
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(idx, frame)| if frame.extra.is_user_relevant { Some(idx) } else { None })
+ }
+
+ /// Re-compute the top user-relevant frame from scratch.
+ pub fn recompute_top_user_relevant_frame(&mut self) {
+ self.top_user_relevant_frame = self.compute_top_user_relevant_frame();
+ }
+
+ /// Set the top user-relevant frame to the given value. Must be equal to what
+ /// `get_top_user_relevant_frame` would return!
+ pub fn set_top_user_relevant_frame(&mut self, frame_idx: usize) {
+ debug_assert_eq!(Some(frame_idx), self.compute_top_user_relevant_frame());
+ self.top_user_relevant_frame = Some(frame_idx);
+ }
+
+ /// Returns the topmost frame that is considered user-relevant, or the
+ /// top of the stack if there is no such frame, or `None` if the stack is empty.
+ pub fn top_user_relevant_frame(&self) -> Option {
+ debug_assert_eq!(self.top_user_relevant_frame, self.compute_top_user_relevant_frame());
+ // This can be called upon creation of an allocation. We create allocations while setting up
+ // parts of the Rust runtime when we do not have any stack frames yet, so we need to handle
+ // empty stacks.
+ self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
+ }
}
impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
@@ -167,6 +208,7 @@ fn default() -> Self {
state: ThreadState::Enabled,
thread_name: None,
stack: Vec::new(),
+ top_user_relevant_frame: None,
join_status: ThreadJoinStatus::Joinable,
panic_payload: None,
last_error: None,
@@ -184,8 +226,15 @@ fn new(name: &str) -> Self {
impl VisitTags for Thread<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
- let Thread { panic_payload, last_error, stack, state: _, thread_name: _, join_status: _ } =
- self;
+ let Thread {
+ panic_payload,
+ last_error,
+ stack,
+ top_user_relevant_frame: _,
+ state: _,
+ thread_name: _,
+ join_status: _,
+ } = self;
panic_payload.visit_tags(visit);
last_error.visit_tags(visit);
@@ -414,7 +463,7 @@ fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
}
/// Get a shared borrow of the currently active thread.
- fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
+ pub fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
&self.threads[self.active_thread]
}
diff --git a/src/tools/miri/src/helpers.rs b/src/tools/miri/src/helpers.rs
index cd5e989b434..8c7bc9eff00 100644
--- a/src/tools/miri/src/helpers.rs
+++ b/src/tools/miri/src/helpers.rs
@@ -936,31 +936,14 @@ fn item_link_name(&self, def_id: DefId) -> Symbol {
}
impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
- pub fn current_span(&self) -> CurrentSpan<'_, 'mir, 'tcx> {
- CurrentSpan { current_frame_idx: None, machine: self }
- }
-}
-
-/// A `CurrentSpan` should be created infrequently (ideally once) per interpreter step. It does
-/// nothing on creation, but when `CurrentSpan::get` is called, searches the current stack for the
-/// topmost frame which corresponds to a local crate, and returns the current span in that frame.
-/// The result of that search is cached so that later calls are approximately free.
-#[derive(Clone)]
-pub struct CurrentSpan<'a, 'mir, 'tcx> {
- current_frame_idx: Option,
- machine: &'a MiriMachine<'mir, 'tcx>,
-}
-
-impl<'a, 'mir: 'a, 'tcx: 'a + 'mir> CurrentSpan<'a, 'mir, 'tcx> {
- pub fn machine(&self) -> &'a MiriMachine<'mir, 'tcx> {
- self.machine
- }
-
- /// Get the current span, skipping non-local frames.
+ /// Get the current span in the topmost function which is workspace-local and not
+ /// `#[track_caller]`.
/// This function is backed by a cache, and can be assumed to be very fast.
- pub fn get(&mut self) -> Span {
- let idx = self.current_frame_idx();
- self.stack().get(idx).map(Frame::current_span).unwrap_or(rustc_span::DUMMY_SP)
+ /// It will work even when the stack is empty.
+ pub fn current_span(&self) -> Span {
+ self.top_user_relevant_frame()
+ .map(|frame_idx| self.stack()[frame_idx].current_span())
+ .unwrap_or(rustc_span::DUMMY_SP)
}
/// Returns the span of the *caller* of the current operation, again
@@ -968,46 +951,27 @@ pub fn get(&mut self) -> Span {
/// current operation is not in a local crate.
/// This is useful when we are processing something which occurs on function-entry and we want
/// to point at the call to the function, not the function definition generally.
- pub fn get_caller(&mut self) -> Span {
+ pub fn caller_span(&self) -> Span {
// We need to go down at least to the caller (len - 2), or however
- // far we have to go to find a frame in a local crate.
- let local_frame_idx = self.current_frame_idx();
- let stack = self.stack();
- let idx = cmp::min(local_frame_idx, stack.len().saturating_sub(2));
- stack.get(idx).map(Frame::current_span).unwrap_or(rustc_span::DUMMY_SP)
+ // far we have to go to find a frame in a local crate which is also not #[track_caller].
+ let frame_idx = self.top_user_relevant_frame().unwrap();
+ let frame_idx = cmp::min(frame_idx, self.stack().len().checked_sub(2).unwrap());
+ self.stack()[frame_idx].current_span()
}
fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameData<'tcx>>] {
- self.machine.threads.active_thread_stack()
+ self.threads.active_thread_stack()
}
- fn current_frame_idx(&mut self) -> usize {
- *self
- .current_frame_idx
- .get_or_insert_with(|| Self::compute_current_frame_index(self.machine))
+ fn top_user_relevant_frame(&self) -> Option {
+ self.threads.active_thread_ref().top_user_relevant_frame()
}
- // Find the position of the inner-most frame which is part of the crate being
- // compiled/executed, part of the Cargo workspace, and is also not #[track_caller].
- #[inline(never)]
- fn compute_current_frame_index(machine: &MiriMachine<'_, '_>) -> usize {
- machine
- .threads
- .active_thread_stack()
- .iter()
- .enumerate()
- .rev()
- .find_map(|(idx, frame)| {
- let def_id = frame.instance.def_id();
- if (def_id.is_local() || machine.local_crates.contains(&def_id.krate))
- && !frame.instance.def.requires_caller_location(machine.tcx)
- {
- Some(idx)
- } else {
- None
- }
- })
- .unwrap_or(0)
+ /// This is the source of truth for the `is_user_relevant` flag in our `FrameExtra`.
+ pub fn is_user_relevant(&self, frame: &Frame<'mir, 'tcx, Provenance>) -> bool {
+ let def_id = frame.instance.def_id();
+ (def_id.is_local() || self.local_crates.contains(&def_id.krate))
+ && !frame.instance.def.requires_caller_location(self.tcx)
}
}
diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs
index 66df0d737c0..8913f8aa10f 100644
--- a/src/tools/miri/src/lib.rs
+++ b/src/tools/miri/src/lib.rs
@@ -97,7 +97,7 @@
pub use crate::eval::{
create_ecx, eval_entry, AlignmentCheck, BacktraceStyle, IsolatedOp, MiriConfig, RejectOpWith,
};
-pub use crate::helpers::{CurrentSpan, EvalContextExt as _};
+pub use crate::helpers::EvalContextExt as _;
pub use crate::intptrcast::ProvenanceMode;
pub use crate::machine::{
AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind,
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index 8243ccd90a3..b7f434b5557 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -50,12 +50,18 @@ pub struct FrameData<'tcx> {
/// for the start of this frame. When we finish executing this frame,
/// we use this to register a completed event with `measureme`.
pub timing: Option,
+
+ /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
+ /// `#[track_caller]`. We compute this once on creation and store the result, as an
+ /// optimization.
+ /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
+ pub is_user_relevant: bool,
}
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Omitting `timing`, it does not support `Debug`.
- let FrameData { stacked_borrows, catch_unwind, timing: _ } = self;
+ let FrameData { stacked_borrows, catch_unwind, timing: _, is_user_relevant: _ } = self;
f.debug_struct("FrameData")
.field("stacked_borrows", stacked_borrows)
.field("catch_unwind", catch_unwind)
@@ -65,7 +71,7 @@ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
impl VisitTags for FrameData<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
- let FrameData { catch_unwind, stacked_borrows, timing: _ } = self;
+ let FrameData { catch_unwind, stacked_borrows, timing: _, is_user_relevant: _ } = self;
catch_unwind.visit_tags(visit);
stacked_borrows.visit_tags(visit);
@@ -895,13 +901,7 @@ fn adjust_allocation<'b>(
let alloc = alloc.into_owned();
let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| {
- Stacks::new_allocation(
- id,
- alloc.size(),
- stacked_borrows,
- kind,
- ecx.machine.current_span(),
- )
+ Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind, &ecx.machine)
});
let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| {
data_race::AllocExtra::new_allocation(
@@ -1016,8 +1016,7 @@ fn before_memory_read(
prov_extra,
range,
machine.stacked_borrows.as_ref().unwrap(),
- machine.current_span(),
- &machine.threads,
+ machine,
)?;
}
if let Some(weak_memory) = &alloc_extra.weak_memory {
@@ -1048,8 +1047,7 @@ fn before_memory_write(
prov_extra,
range,
machine.stacked_borrows.as_ref().unwrap(),
- machine.current_span(),
- &machine.threads,
+ machine,
)?;
}
if let Some(weak_memory) = &alloc_extra.weak_memory {
@@ -1083,8 +1081,7 @@ fn before_memory_deallocation(
prove_extra,
range,
machine.stacked_borrows.as_ref().unwrap(),
- machine.current_span(),
- &machine.threads,
+ machine,
)
} else {
Ok(())
@@ -1126,7 +1123,9 @@ fn init_frame_extra(
stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame(&ecx.machine)),
catch_unwind: None,
timing,
+ is_user_relevant: ecx.machine.is_user_relevant(&frame),
};
+
Ok(frame.with_extra(extra))
}
@@ -1174,6 +1173,13 @@ fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
#[inline(always)]
fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ if ecx.frame().extra.is_user_relevant {
+ // We just pushed a local frame, so we know that the topmost local frame is the topmost
+ // frame. If we push a non-local frame, there's no need to do anything.
+ let stack_len = ecx.active_thread_stack().len();
+ ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
+ }
+
if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
}
@@ -1183,6 +1189,13 @@ fn after_stack_pop(
mut frame: Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopJump> {
+ if frame.extra.is_user_relevant {
+ // All that we store is whether or not the frame we just removed is local, so now we
+ // have no idea where the next topmost local frame is. So we recompute it.
+ // (If this ever becomes a bottleneck, we could have `push` store the previous
+ // user-relevant frame and restore that here.)
+ ecx.active_thread_mut().recompute_top_user_relevant_frame();
+ }
let timing = frame.extra.timing.take();
if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
stacked_borrows.borrow_mut().end_call(&frame.extra);
diff --git a/src/tools/miri/src/stacked_borrows/diagnostics.rs b/src/tools/miri/src/stacked_borrows/diagnostics.rs
index f307bf11edd..662d8ada735 100644
--- a/src/tools/miri/src/stacked_borrows/diagnostics.rs
+++ b/src/tools/miri/src/stacked_borrows/diagnostics.rs
@@ -5,7 +5,6 @@
use rustc_span::{Span, SpanData};
use rustc_target::abi::Size;
-use crate::helpers::CurrentSpan;
use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind};
use crate::*;
@@ -110,42 +109,29 @@ pub struct TagHistory {
pub protected: Option<(String, SpanData)>,
}
-pub struct DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
+pub struct DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
operation: Operation,
- // 'span cannot be merged with any other lifetime since they appear invariantly, under the
- // mutable ref.
- current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
}
-pub struct DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
+pub struct DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
operation: Operation,
- // 'span and 'history cannot be merged, since when we call `unbuild` we need
- // to return the exact 'span that was used when calling `build`.
- current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
history: &'history mut AllocHistory,
offset: Size,
}
-impl<'span, 'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
+impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
pub fn build<'history>(
self,
history: &'history mut AllocHistory,
offset: Size,
- ) -> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
- DiagnosticCx {
- operation: self.operation,
- current_span: self.current_span,
- threads: self.threads,
- history,
- offset,
- }
+ ) -> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
+ DiagnosticCx { operation: self.operation, machine: self.machine, history, offset }
}
pub fn retag(
- current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
cause: RetagCause,
new_tag: SbTag,
orig_tag: ProvenanceExtra,
@@ -154,46 +140,36 @@ pub fn retag(
let operation =
Operation::Retag(RetagOp { cause, new_tag, orig_tag, range, permission: None });
- DiagnosticCxBuilder { current_span, threads, operation }
+ DiagnosticCxBuilder { machine, operation }
}
pub fn read(
- current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
tag: ProvenanceExtra,
range: AllocRange,
) -> Self {
let operation = Operation::Access(AccessOp { kind: AccessKind::Read, tag, range });
- DiagnosticCxBuilder { current_span, threads, operation }
+ DiagnosticCxBuilder { machine, operation }
}
pub fn write(
- current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
tag: ProvenanceExtra,
range: AllocRange,
) -> Self {
let operation = Operation::Access(AccessOp { kind: AccessKind::Write, tag, range });
- DiagnosticCxBuilder { current_span, threads, operation }
+ DiagnosticCxBuilder { machine, operation }
}
- pub fn dealloc(
- current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
- tag: ProvenanceExtra,
- ) -> Self {
+ pub fn dealloc(machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra) -> Self {
let operation = Operation::Dealloc(DeallocOp { tag });
- DiagnosticCxBuilder { current_span, threads, operation }
+ DiagnosticCxBuilder { machine, operation }
}
}
-impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
- pub fn unbuild(self) -> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
- DiagnosticCxBuilder {
- operation: self.operation,
- current_span: self.current_span,
- threads: self.threads,
- }
+impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
+ pub fn unbuild(self) -> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
+ DiagnosticCxBuilder { machine: self.machine, operation: self.operation }
}
}
@@ -234,10 +210,10 @@ struct DeallocOp {
}
impl AllocHistory {
- pub fn new(id: AllocId, item: Item, current_span: &mut CurrentSpan<'_, '_, '_>) -> Self {
+ pub fn new(id: AllocId, item: Item, machine: &MiriMachine<'_, '_>) -> Self {
Self {
id,
- base: (item, current_span.get()),
+ base: (item, machine.current_span()),
creations: SmallVec::new(),
invalidations: SmallVec::new(),
protectors: SmallVec::new(),
@@ -245,7 +221,7 @@ pub fn new(id: AllocId, item: Item, current_span: &mut CurrentSpan<'_, '_, '_>)
}
}
-impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
+impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
pub fn start_grant(&mut self, perm: Permission) {
let Operation::Retag(op) = &mut self.operation else {
unreachable!("start_grant must only be called during a retag, this is: {:?}", self.operation)
@@ -274,15 +250,17 @@ pub fn log_creation(&mut self) {
let Operation::Retag(op) = &self.operation else {
unreachable!("log_creation must only be called during a retag")
};
- self.history.creations.push(Creation { retag: op.clone(), span: self.current_span.get() });
+ self.history
+ .creations
+ .push(Creation { retag: op.clone(), span: self.machine.current_span() });
}
pub fn log_invalidation(&mut self, tag: SbTag) {
- let mut span = self.current_span.get();
+ let mut span = self.machine.current_span();
let (range, cause) = match &self.operation {
Operation::Retag(RetagOp { cause, range, permission, .. }) => {
if *cause == RetagCause::FnEntry {
- span = self.current_span.get_caller();
+ span = self.machine.caller_span();
}
(*range, InvalidationCause::Retag(permission.unwrap(), *cause))
}
@@ -301,7 +279,9 @@ pub fn log_protector(&mut self) {
let Operation::Retag(op) = &self.operation else {
unreachable!("Protectors can only be created during a retag")
};
- self.history.protectors.push(Protection { tag: op.new_tag, span: self.current_span.get() });
+ self.history
+ .protectors
+ .push(Protection { tag: op.new_tag, span: self.machine.current_span() });
}
pub fn get_logs_relevant_to(
@@ -418,6 +398,7 @@ pub(super) fn protector_error(&self, item: &Item, kind: ProtectorKind) -> Interp
ProtectorKind::StrongProtector => "strongly protected",
};
let call_id = self
+ .machine
.threads
.all_stacks()
.flatten()
@@ -482,9 +463,7 @@ pub fn check_tracked_tag_popped(&self, item: &Item, global: &GlobalStateInner) {
Some((orig_tag, kind))
}
};
- self.current_span
- .machine()
- .emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
+ self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
}
}
diff --git a/src/tools/miri/src/stacked_borrows/mod.rs b/src/tools/miri/src/stacked_borrows/mod.rs
index 9fdd1562c19..db4e19f9118 100644
--- a/src/tools/miri/src/stacked_borrows/mod.rs
+++ b/src/tools/miri/src/stacked_borrows/mod.rs
@@ -340,7 +340,7 @@ fn find_first_write_incompatible(&self, granting: usize) -> usize {
fn item_invalidated(
item: &Item,
global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
cause: ItemInvalidationCause,
) -> InterpResult<'tcx> {
if !global.tracked_pointer_tags.is_empty() {
@@ -385,7 +385,7 @@ fn access(
access: AccessKind,
tag: ProvenanceExtra,
global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
exposed_tags: &FxHashSet,
) -> InterpResult<'tcx> {
// Two main steps: Find granting item, remove incompatible items above.
@@ -471,7 +471,7 @@ fn dealloc(
&mut self,
tag: ProvenanceExtra,
global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
exposed_tags: &FxHashSet,
) -> InterpResult<'tcx> {
// Step 1: Make a write access.
@@ -499,7 +499,7 @@ fn grant(
derived_from: ProvenanceExtra,
new: Item,
global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
exposed_tags: &FxHashSet,
) -> InterpResult<'tcx> {
dcx.start_grant(new.perm());
@@ -590,14 +590,14 @@ fn new(
perm: Permission,
tag: SbTag,
id: AllocId,
- current_span: &mut CurrentSpan<'_, '_, '_>,
+ machine: &MiriMachine<'_, '_>,
) -> Self {
let item = Item::new(tag, perm, false);
let stack = Stack::new(item);
Stacks {
stacks: RangeMap::new(size, stack),
- history: AllocHistory::new(id, item, current_span),
+ history: AllocHistory::new(id, item, machine),
exposed_tags: FxHashSet::default(),
modified_since_last_gc: false,
}
@@ -607,10 +607,10 @@ fn new(
fn for_each(
&mut self,
range: AllocRange,
- mut dcx_builder: DiagnosticCxBuilder<'_, '_, '_, 'tcx>,
+ mut dcx_builder: DiagnosticCxBuilder<'_, '_, 'tcx>,
mut f: impl FnMut(
&mut Stack,
- &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
+ &mut DiagnosticCx<'_, '_, '_, 'tcx>,
&mut FxHashSet,
) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
@@ -631,7 +631,7 @@ pub fn new_allocation(
size: Size,
state: &GlobalState,
kind: MemoryKind,
- mut current_span: CurrentSpan<'_, '_, '_>,
+ machine: &MiriMachine<'_, '_>,
) -> Self {
let mut extra = state.borrow_mut();
let (base_tag, perm) = match kind {
@@ -640,12 +640,11 @@ pub fn new_allocation(
// not through a pointer). That is, whenever we directly write to a local, this will pop
// everything else off the stack, invalidating all previous pointers,
// and in particular, *all* raw pointers.
- MemoryKind::Stack =>
- (extra.base_ptr_tag(id, current_span.machine()), Permission::Unique),
+ MemoryKind::Stack => (extra.base_ptr_tag(id, machine), Permission::Unique),
// Everything else is shared by default.
- _ => (extra.base_ptr_tag(id, current_span.machine()), Permission::SharedReadWrite),
+ _ => (extra.base_ptr_tag(id, machine), Permission::SharedReadWrite),
};
- Stacks::new(size, perm, base_tag, id, &mut current_span)
+ Stacks::new(size, perm, base_tag, id, machine)
}
#[inline(always)]
@@ -655,8 +654,7 @@ pub fn before_memory_read<'tcx, 'mir, 'ecx>(
tag: ProvenanceExtra,
range: AllocRange,
state: &GlobalState,
- mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
) -> InterpResult<'tcx>
where
'tcx: 'ecx,
@@ -667,7 +665,7 @@ pub fn before_memory_read<'tcx, 'mir, 'ecx>(
Pointer::new(alloc_id, range.start),
range.size.bytes()
);
- let dcx = DiagnosticCxBuilder::read(&mut current_span, threads, tag, range);
+ let dcx = DiagnosticCxBuilder::read(machine, tag, range);
let state = state.borrow();
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
@@ -681,8 +679,7 @@ pub fn before_memory_write<'tcx, 'mir, 'ecx>(
tag: ProvenanceExtra,
range: AllocRange,
state: &GlobalState,
- mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
) -> InterpResult<'tcx> {
trace!(
"write access with tag {:?}: {:?}, size {}",
@@ -690,7 +687,7 @@ pub fn before_memory_write<'tcx, 'mir, 'ecx>(
Pointer::new(alloc_id, range.start),
range.size.bytes()
);
- let dcx = DiagnosticCxBuilder::write(&mut current_span, threads, tag, range);
+ let dcx = DiagnosticCxBuilder::write(machine, tag, range);
let state = state.borrow();
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
@@ -704,11 +701,10 @@ pub fn before_memory_deallocation<'tcx, 'mir, 'ecx>(
tag: ProvenanceExtra,
range: AllocRange,
state: &GlobalState,
- mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>,
- threads: &'ecx ThreadManager<'mir, 'tcx>,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
) -> InterpResult<'tcx> {
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
- let dcx = DiagnosticCxBuilder::dealloc(&mut current_span, threads, tag);
+ let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
let state = state.borrow();
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
stack.dealloc(tag, &state, dcx, exposed_tags)
@@ -773,7 +769,6 @@ fn reborrow(
let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id);
match alloc_kind {
AllocKind::LiveData => {
- let current_span = &mut this.machine.current_span();
// This should have alloc_extra data, but `get_alloc_extra` can still fail
// if converting this alloc_id from a global to a local one
// uncovers a non-supported `extern static`.
@@ -783,12 +778,10 @@ fn reborrow(
.as_ref()
.expect("we should have Stacked Borrows data")
.borrow_mut();
- let threads = &this.machine.threads;
// Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
// FIXME: can this be done cleaner?
let dcx = DiagnosticCxBuilder::retag(
- current_span,
- threads,
+ &this.machine,
retag_cause,
new_tag,
orig_tag,
@@ -895,8 +888,6 @@ fn reborrow(
.as_ref()
.expect("we should have Stacked Borrows data")
.borrow_mut();
- // FIXME: can't share this with the current_span inside log_creation
- let mut current_span = this.machine.current_span();
this.visit_freeze_sensitive(place, size, |mut range, frozen| {
// Adjust range.
range.start += base_offset;
@@ -916,8 +907,7 @@ fn reborrow(
let item = Item::new(new_tag, perm, protected);
let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
let dcx = DiagnosticCxBuilder::retag(
- &mut current_span, // FIXME avoid this `clone`
- &this.machine.threads,
+ &this.machine,
retag_cause,
new_tag,
orig_tag,
@@ -943,11 +933,8 @@ fn reborrow(
let item = Item::new(new_tag, perm, protect.is_some());
let range = alloc_range(base_offset, size);
let global = machine.stacked_borrows.as_ref().unwrap().borrow();
- // FIXME: can't share this with the current_span inside log_creation
- let current_span = &mut machine.current_span();
let dcx = DiagnosticCxBuilder::retag(
- current_span,
- &machine.threads,
+ machine,
retag_cause,
new_tag,
orig_tag,