diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 6c0c62a1b76..576f098e6e4 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -251,32 +251,9 @@ fn to_refined_spans(mut self) -> Vec { } else if curr.is_closure { self.carve_out_span_for_closure(); } else if self.prev_original_span == curr.span { - // Note that this compares the new (`curr`) span to `prev_original_span`. - // In this branch, the actual span byte range of `prev_original_span` is not - // important. What is important is knowing whether the new `curr` span was - // **originally** the same as the original span of `prev()`. The original spans - // reflect their original sort order, and for equal spans, conveys a partial - // ordering based on CFG dominator priority. - if prev.visible_macro.is_some() && curr.visible_macro.is_some() { - // Macros that expand to include branching (such as - // `assert_eq!()`, `assert_ne!()`, `info!()`, `debug!()`, or - // `trace!()`) typically generate callee spans with identical - // ranges (typically the full span of the macro) for all - // `BasicBlocks`. This makes it impossible to distinguish - // the condition (`if val1 != val2`) from the optional - // branched statements (such as the call to `panic!()` on - // assert failure). In this case it is better (or less - // worse) to drop the optional branch bcbs and keep the - // non-conditional statements, to count when reached. - debug!( - " curr and prev are part of a macro expansion, and curr has the same span \ - as prev, but is in a different bcb. Drop curr and keep prev for next iter. \ - prev={prev:?}", - ); - self.take_curr(); // Discards curr. - } else { - self.update_pending_dups(); - } + // `prev` and `curr` have the same span, or would have had the + // same span before `prev` was modified by other spans. + self.update_pending_dups(); } else { self.cutoff_prev_at_overlapping_curr(); self.maybe_push_macro_name_span(); diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index fde25ad994f..b28abcf71d5 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -1,4 +1,5 @@ use rustc_data_structures::captures::Captures; +use rustc_data_structures::fx::FxHashSet; use rustc_middle::mir::{ self, AggregateKind, FakeReadCause, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, @@ -35,6 +36,9 @@ pub(super) fn mir_to_initial_sorted_coverage_spans( } } + initial_spans.sort_by(|a, b| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb)); + remove_unwanted_macro_spans(&mut initial_spans); + initial_spans.sort_by(|a, b| { // First sort by span start. Ord::cmp(&a.span.lo(), &b.span.lo()) @@ -55,6 +59,26 @@ pub(super) fn mir_to_initial_sorted_coverage_spans( initial_spans } +/// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate +/// multiple condition/consequent blocks that have the span of the whole macro +/// invocation, which is unhelpful. Keeping only the first such span seems to +/// give better mappings, so remove the others. +/// +/// (The input spans should be sorted in BCB dominator order, so that the +/// retained "first" span is likely to dominate the others.) +fn remove_unwanted_macro_spans(initial_spans: &mut Vec) { + let mut seen_macro_spans = FxHashSet::default(); + initial_spans.retain(|covspan| { + // Ignore (retain) closure spans and non-macro-expansion spans. + if covspan.is_closure || covspan.visible_macro.is_none() { + return true; + } + + // Retain only the first macro-expanded covspan with this span. + seen_macro_spans.insert(covspan.span) + }); +} + // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will