Auto merge of #2867 - Vanille-N:tb-diags, r=RalfJung

Simplify event selection in TB diagnostics

As discussed previously, getting the range from `RangeMap` can make the filtering of events much simpler without any user-visible diff.

See minor exception in [<9d8fc00>](9d8fc00a4a) and decide how to resolve it
- add a boolean flag not to record events produced by deallocations ?
- add a `help: deallocation counts as an implicit write` ? (Note: could be generalized to also include `help: reborrow counts as an implicit read`)
- not bother and keep as-is ?
- something else ?
This commit is contained in:
bors 2023-05-06 13:00:33 +00:00
commit dd9df18792
6 changed files with 109 additions and 94 deletions

View File

@ -459,7 +459,7 @@ fn grant(
impl Stacks {
pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
if self.modified_since_last_gc {
for stack in self.stacks.iter_mut_all() {
for (_stack_range, stack) in self.stacks.iter_mut_all() {
if stack.len() > 64 {
stack.retain(live_tags);
}
@ -511,8 +511,8 @@ fn for_each(
) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
self.modified_since_last_gc = true;
for (offset, stack) in self.stacks.iter_mut(range.start, range.size) {
let mut dcx = dcx_builder.build(&mut self.history, offset);
for (stack_range, stack) in self.stacks.iter_mut(range.start, range.size) {
let mut dcx = dcx_builder.build(&mut self.history, Size::from_bytes(stack_range.start));
f(stack, &mut dcx, &mut self.exposed_tags)?;
dcx_builder = dcx.unbuild();
}

View File

@ -3,7 +3,6 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_span::{Span, SpanData};
use rustc_target::abi::Size;
use crate::borrow_tracker::tree_borrows::{
perms::{PermTransition, Permission},
@ -14,18 +13,30 @@
use crate::*;
/// Complete data for an event:
/// - `kind` is what happened to the permissions
/// - `access_kind` and `access_range` describe the access that caused the event
/// - `offset` allows filtering only the relevant events for a given memory location
/// (see how we perform the filtering in `History::extract_relevant`.
/// - `span` is the line of code in question
#[derive(Clone, Debug)]
pub struct Event {
/// Transformation of permissions that occured because of this event
pub transition: PermTransition,
/// Kind of the access that triggered this event
pub access_kind: AccessKind,
/// Relative position of the tag to the one used for the access
pub is_foreign: bool,
/// User-visible range of the access
pub access_range: AllocRange,
pub offset: Size,
/// The transition recorded by this event only occured on a subrange of
/// `access_range`: a single access on `access_range` triggers several events,
/// each with their own mutually disjoint `transition_range`. No-op transitions
/// should not be recorded as events, so the union of all `transition_range` is not
/// necessarily the entire `access_range`.
///
/// No data from any `transition_range` should ever be user-visible, because
/// both the start and end of `transition_range` are entirely dependent on the
/// internal representation of `RangeMap` which is supposed to be opaque.
/// What will be shown in the error message is the first byte `error_offset` of
/// the `TbError`, which should satisfy
/// `event.transition_range.contains(error.error_offset)`.
pub transition_range: Range<u64>,
/// Line of code that triggered this event
pub span: Span,
}
@ -35,9 +46,9 @@ pub struct Event {
/// Available filtering methods include `History::forget` and `History::extract_relevant`.
#[derive(Clone, Debug)]
pub struct History {
pub tag: BorTag,
pub created: (Span, Permission),
pub events: Vec<Event>,
tag: BorTag,
created: (Span, Permission),
events: Vec<Event>,
}
/// History formatted for use by `src/diagnostics.rs`.
@ -60,12 +71,7 @@ impl HistoryData {
// Format events from `new_history` into those recorded by `self`.
//
// NOTE: also converts `Span` to `SpanData`.
pub fn extend(
&mut self,
new_history: History,
tag_name: &'static str,
show_initial_state: bool,
) {
fn extend(&mut self, new_history: History, tag_name: &'static str, show_initial_state: bool) {
let History { tag, created, events } = new_history;
let this = format!("the {tag_name} tag {tag:?}");
let msg_initial_state = format!(", in the initial state {}", created.1);
@ -75,9 +81,16 @@ pub fn extend(
);
self.events.push((Some(created.0.data()), msg_creation));
for &Event { transition, access_kind, is_foreign, access_range, span, offset: _ } in &events
for &Event {
transition,
access_kind,
is_foreign,
access_range,
span,
transition_range: _,
} in &events
{
// NOTE: `offset` is explicitly absent from the error message, it has no significance
// NOTE: `transition_range` is explicitly absent from the error message, it has no significance
// to the user. The meaningful one is `access_range`.
self.events.push((Some(span.data()), format!("{this} then transitioned {transition} due to a {rel} {access_kind} at offsets {access_range:?}", rel = if is_foreign { "foreign" } else { "child" })));
self.events.push((None, format!("this corresponds to {}", transition.summary())));
@ -197,44 +210,19 @@ fn forget(&self) -> Self {
History { events: Vec::new(), created: self.created, tag: self.tag }
}
/// Reconstruct the history relevant to `error_offset` knowing that
/// its permission followed `complete_transition`.
///
/// Here's how we do this:
/// - we know `full := complete_transition` the transition of the permission from
/// its initialization to the state just before the error was caused,
/// we want to find a chain of events that produces `full`
/// - we decompose `full` into `pre o post` where
/// `pre` is the best applicable transition from recorded events
/// - we select the event that caused `pre` and iterate
/// to find the chain of events that produces `full := post`
///
/// To find the "best applicable transition" for full:
/// - eliminate events that cannot be applied because their offset is too big
/// - eliminate events that cannot be applied because their starting point is wrong
/// - select the one that happened closest to the range of interest
fn extract_relevant(&self, complete_transition: PermTransition, error_offset: Size) -> Self {
let mut selected_events: Vec<Event> = Vec::new();
let mut full = complete_transition;
while !full.is_noop() {
let (pre, post) = self
/// Reconstruct the history relevant to `error_offset` by filtering
/// only events whose range contains the offset we are interested in.
fn extract_relevant(&self, error_offset: u64) -> Self {
History {
events: self
.events
.iter()
.filter(|e| e.offset <= error_offset)
.filter_map(|pre_canditate| {
full.apply_start(pre_canditate.transition)
.map(|post_canditate| (pre_canditate, post_canditate))
})
.max_by_key(|(pre_canditate, _post_candidate)| pre_canditate.offset)
.unwrap();
// If this occurs we will loop infinitely !
// Make sure to only put non-noop transitions in `History`.
assert!(!pre.transition.is_noop());
full = post;
selected_events.push(pre.clone());
.filter(|e| e.transition_range.contains(&error_offset))
.cloned()
.collect::<Vec<_>>(),
created: self.created,
tag: self.tag,
}
History { events: selected_events, created: self.created, tag: self.tag }
}
}
@ -242,8 +230,8 @@ fn extract_relevant(&self, complete_transition: PermTransition, error_offset: Si
pub(super) struct TbError<'node> {
/// What failure occurred.
pub error_kind: TransitionError,
/// The byte at which the conflict occured.
pub error_offset: Size,
/// The offset (into the allocation) at which the conflict occurred.
pub error_offset: u64,
/// The tag on which the error was triggered.
/// On protector violations, this is the tag that was protected.
/// On accesses rejected due to insufficient permissions, this is the
@ -261,12 +249,11 @@ impl TbError<'_> {
/// Produce a UB error.
pub fn build<'tcx>(self) -> InterpError<'tcx> {
use TransitionError::*;
let started_as = self.conflicting_info.history.created.1;
let kind = self.access_kind;
let accessed = self.accessed_info;
let conflicting = self.conflicting_info;
let accessed_is_conflicting = accessed.tag == conflicting.tag;
let (pre_error, title, details, conflicting_tag_name) = match self.error_kind {
let (title, details, conflicting_tag_name) = match self.error_kind {
ChildAccessForbidden(perm) => {
let conflicting_tag_name =
if accessed_is_conflicting { "accessed" } else { "conflicting" };
@ -280,7 +267,7 @@ pub fn build<'tcx>(self) -> InterpError<'tcx> {
details.push(format!(
"the {conflicting_tag_name} tag {conflicting} has state {perm} which forbids child {kind}es"
));
(perm, title, details, conflicting_tag_name)
(title, details, conflicting_tag_name)
}
ProtectedTransition(transition) => {
let conflicting_tag_name = "protected";
@ -297,7 +284,7 @@ pub fn build<'tcx>(self) -> InterpError<'tcx> {
loss = transition.summary(),
),
];
(transition.started(), title, details, conflicting_tag_name)
(title, details, conflicting_tag_name)
}
ProtectedDealloc => {
let conflicting_tag_name = "strongly protected";
@ -308,16 +295,15 @@ pub fn build<'tcx>(self) -> InterpError<'tcx> {
),
format!("the {conflicting_tag_name} tag {conflicting} disallows deallocations"),
];
(started_as, title, details, conflicting_tag_name)
(title, details, conflicting_tag_name)
}
};
let pre_transition = PermTransition::from(started_as, pre_error).unwrap();
let mut history = HistoryData::default();
if !accessed_is_conflicting {
history.extend(self.accessed_info.history.forget(), "accessed", false);
}
history.extend(
self.conflicting_info.history.extract_relevant(pre_transition, self.error_offset),
self.conflicting_info.history.extract_relevant(self.error_offset),
conflicting_tag_name,
true,
);

View File

@ -311,7 +311,7 @@ pub fn new_child(
parent_tag: BorTag,
new_tag: BorTag,
default_initial_perm: Permission,
range: AllocRange,
reborrow_range: AllocRange,
span: Span,
) -> InterpResult<'tcx> {
assert!(!self.tag_mapping.contains_key(&new_tag));
@ -332,7 +332,8 @@ pub fn new_child(
self.nodes.get_mut(parent_idx).unwrap().children.push(idx);
// Initialize perms
let perm = LocationState::new(default_initial_perm).with_access();
for (_range, perms) in self.rperms.iter_mut(range.start, range.size) {
for (_perms_range, perms) in self.rperms.iter_mut(reborrow_range.start, reborrow_range.size)
{
perms.insert(idx, perm);
}
Ok(())
@ -344,12 +345,12 @@ pub fn new_child(
pub fn dealloc(
&mut self,
tag: BorTag,
range: AllocRange,
access_range: AllocRange,
global: &GlobalState,
span: Span, // diagnostics
) -> InterpResult<'tcx> {
self.perform_access(AccessKind::Write, tag, range, global, span)?;
for (offset, perms) in self.rperms.iter_mut(range.start, range.size) {
self.perform_access(AccessKind::Write, tag, access_range, global, span)?;
for (perms_range, perms) in self.rperms.iter_mut(access_range.start, access_range.size) {
TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms }
.traverse_parents_this_children_others(
tag,
@ -368,7 +369,7 @@ pub fn dealloc(
TbError {
conflicting_info,
access_kind: AccessKind::Write,
error_offset: offset,
error_offset: perms_range.start,
error_kind,
accessed_info,
}
@ -388,11 +389,11 @@ pub fn perform_access(
&mut self,
access_kind: AccessKind,
tag: BorTag,
range: AllocRange,
access_range: AllocRange,
global: &GlobalState,
span: Span, // diagnostics
) -> InterpResult<'tcx> {
for (offset, perms) in self.rperms.iter_mut(range.start, range.size) {
for (perms_range, perms) in self.rperms.iter_mut(access_range.start, access_range.size) {
TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms }
.traverse_parents_this_children_others(
tag,
@ -456,9 +457,9 @@ pub fn perform_access(
node.debug_info.history.push(diagnostics::Event {
transition,
access_kind,
access_range: range,
is_foreign: rel_pos.is_foreign(),
offset,
access_range,
transition_range: perms_range.clone(),
span,
});
old_state.permission =
@ -472,7 +473,7 @@ pub fn perform_access(
TbError {
conflicting_info,
access_kind,
error_offset: offset,
error_offset: perms_range.start,
error_kind,
accessed_info,
}
@ -530,7 +531,7 @@ fn keep_only_needed(&mut self, idx: UniIndex, live: &FxHashSet<BorTag>) -> bool
// the tag from the mapping.
let tag = node.tag;
self.nodes.remove(idx);
for perms in self.rperms.iter_mut_all() {
for (_perms_range, perms) in self.rperms.iter_mut_all() {
perms.remove(idx);
}
self.tag_mapping.remove(&tag);

View File

@ -868,7 +868,7 @@ pub(super) fn race_free_with_atomic(
pub fn read<'tcx>(
&self,
alloc_id: AllocId,
range: AllocRange,
access_range: AllocRange,
machine: &MiriMachine<'_, '_>,
) -> InterpResult<'tcx> {
let current_span = machine.current_span();
@ -876,7 +876,9 @@ pub fn read<'tcx>(
if global.race_detecting() {
let (index, mut thread_clocks) = global.current_thread_state_mut(&machine.threads);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (offset, mem_clocks) in alloc_ranges.iter_mut(range.start, range.size) {
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, current_span)
{
@ -888,7 +890,7 @@ pub fn read<'tcx>(
mem_clocks,
"Read",
false,
Pointer::new(alloc_id, offset),
Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
);
}
}
@ -902,7 +904,7 @@ pub fn read<'tcx>(
fn unique_access<'tcx>(
&mut self,
alloc_id: AllocId,
range: AllocRange,
access_range: AllocRange,
write_type: WriteType,
machine: &mut MiriMachine<'_, '_>,
) -> InterpResult<'tcx> {
@ -910,8 +912,8 @@ fn unique_access<'tcx>(
let global = machine.data_race.as_mut().unwrap();
if global.race_detecting() {
let (index, mut thread_clocks) = global.current_thread_state_mut(&machine.threads);
for (offset, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(range.start, range.size)
for (mem_clocks_range, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) = mem_clocks.write_race_detect(
&mut thread_clocks,
@ -927,7 +929,7 @@ fn unique_access<'tcx>(
mem_clocks,
write_type.get_descriptor(),
false,
Pointer::new(alloc_id, offset),
Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
);
}
}
@ -1150,7 +1152,7 @@ fn validate_atomic_op<A: Debug + Copy>(
&this.machine.threads,
current_span,
|index, mut thread_clocks| {
for (offset, mem_clocks) in
for (mem_clocks_range, mem_clocks) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic)
@ -1162,7 +1164,10 @@ fn validate_atomic_op<A: Debug + Copy>(
mem_clocks,
description,
true,
Pointer::new(alloc_id, offset),
Pointer::new(
alloc_id,
Size::from_bytes(mem_clocks_range.start),
),
)
.map(|_| true);
}

View File

@ -62,8 +62,10 @@ fn find_offset(&self, offset: u64) -> usize {
/// *not* split items if they overlap with the edges. Do not use this to mutate
/// through interior mutability.
///
/// The iterator also provides the offset of the given element.
pub fn iter(&self, offset: Size, len: Size) -> impl Iterator<Item = (Size, &T)> {
/// The iterator also provides the range of the given element.
/// How exactly the ranges are split can differ even for otherwise identical
/// maps, so user-visible behavior should never depend on the exact range.
pub fn iter(&self, offset: Size, len: Size) -> impl Iterator<Item = (ops::Range<u64>, &T)> {
let offset = offset.bytes();
let len = len.bytes();
// Compute a slice starting with the elements we care about.
@ -84,13 +86,21 @@ pub fn iter(&self, offset: Size, len: Size) -> impl Iterator<Item = (Size, &T)>
slice
.iter()
.take_while(move |elem| elem.range.start < end)
.map(|elem| (Size::from_bytes(elem.range.start), &elem.data))
.map(|elem| (elem.range.clone(), &elem.data))
}
pub fn iter_mut_all(&mut self) -> impl Iterator<Item = &mut T> {
self.v.iter_mut().map(|elem| &mut elem.data)
/// Provides mutable iteration over all elements.
/// The iterator also provides the range of the given element.
/// How exactly the ranges are split can differ even for otherwise identical
/// maps, so user-visible behavior should never depend on the exact range.
pub fn iter_mut_all(&mut self) -> impl Iterator<Item = (ops::Range<u64>, &mut T)> {
self.v.iter_mut().map(|elem| (elem.range.clone(), &mut elem.data))
}
/// Provides iteration over all elements.
/// The iterator also provides the range of the given element.
/// How exactly the ranges are split can differ even for otherwise identical
/// maps, so user-visible behavior should never depend on the exact range.
pub fn iter_all(&self) -> impl Iterator<Item = (ops::Range<u64>, &T)> {
self.v.iter().map(|elem| (elem.range.clone(), &elem.data))
}
@ -126,8 +136,15 @@ fn split_index(&mut self, index: usize, split_offset: u64) -> bool
/// to make sure that when they are mutated, the effect is constrained to the given range.
/// Moreover, this will opportunistically merge neighbouring equal blocks.
///
/// The iterator also provides the offset of the given element.
pub fn iter_mut(&mut self, offset: Size, len: Size) -> impl Iterator<Item = (Size, &mut T)>
/// The iterator also provides the range of the given element.
/// How exactly the ranges are split (both prior to and resulting from the execution of this
/// function) can differ even for otherwise identical maps,
/// so user-visible behavior should never depend on the exact range.
pub fn iter_mut(
&mut self,
offset: Size,
len: Size,
) -> impl Iterator<Item = (ops::Range<u64>, &mut T)>
where
T: Clone + PartialEq,
{
@ -208,7 +225,7 @@ pub fn iter_mut(&mut self, offset: Size, len: Size) -> impl Iterator<Item = (Siz
// Now we yield the slice. `end` is inclusive.
&mut self.v[first_idx..=end_idx]
};
slice.iter_mut().map(|elem| (Size::from_bytes(elem.range.start), &mut elem.data))
slice.iter_mut().map(|elem| (elem.range.clone(), &mut elem.data))
}
}

View File

@ -17,6 +17,12 @@ help: the strongly protected tag <TAG> was created here, in the initial state Re
|
LL | fn inner(x: &mut i32, f: fn(&mut i32)) {
| ^
help: the strongly protected tag <TAG> then transitioned from Reserved to Active due to a child write access at offsets [0x0..0x4]
--> $DIR/strongly-protected.rs:LL:CC
|
LL | drop(unsafe { Box::from_raw(raw) });
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: this corresponds to an activation
= note: BACKTRACE (of the first span):
= note: inside `std::alloc::dealloc` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `<std::alloc::Global as std::alloc::Allocator>::deallocate` at RUSTLIB/alloc/src/alloc.rs:LL:CC