Rollup merge of #118695 - Zalathar:push-refined, r=davidtwco

coverage: Merge refined spans in a separate final pass

Pulling this merge step out of `push_refined_span` and into a separate pass lets us push directly to `refined_spans` instead of calling a helper method.

Because the compiler can now see partial borrows of `refined_spans`, we can remove some extra code that was jumping through hoops to satisfy the borrow checker.

---

``@rustbot`` label +A-code-coverage
This commit is contained in:
Matthias Krüger 2023-12-08 06:44:43 +01:00 committed by GitHub
commit f7c892e323
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -89,10 +89,10 @@ pub(super) fn new(
} }
} }
pub fn merge_from(&mut self, mut other: CoverageSpan) { pub fn merge_from(&mut self, other: &Self) {
debug_assert!(self.is_mergeable(&other)); debug_assert!(self.is_mergeable(other));
self.span = self.span.to(other.span); self.span = self.span.to(other.span);
self.merged_spans.append(&mut other.merged_spans); self.merged_spans.extend_from_slice(&other.merged_spans);
} }
pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) { pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
@ -267,7 +267,7 @@ fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
if curr.is_mergeable(prev) { if curr.is_mergeable(prev) {
debug!(" same bcb (and neither is a closure), merge with prev={prev:?}"); debug!(" same bcb (and neither is a closure), merge with prev={prev:?}");
let prev = self.take_prev(); let prev = self.take_prev();
self.curr_mut().merge_from(prev); self.curr_mut().merge_from(&prev);
self.maybe_push_macro_name_span(); self.maybe_push_macro_name_span();
// Note that curr.span may now differ from curr_original_span // Note that curr.span may now differ from curr_original_span
} else if prev.span.hi() <= curr.span.lo() { } else if prev.span.hi() <= curr.span.lo() {
@ -275,7 +275,7 @@ fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
" different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}", " different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
); );
let prev = self.take_prev(); let prev = self.take_prev();
self.push_refined_span(prev); self.refined_spans.push(prev);
self.maybe_push_macro_name_span(); self.maybe_push_macro_name_span();
} else if prev.is_closure { } else if prev.is_closure {
// drop any equal or overlapping span (`curr`) and keep `prev` to test again in the // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
@ -322,11 +322,10 @@ fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
let prev = self.take_prev(); let prev = self.take_prev();
debug!(" AT END, adding last prev={prev:?}"); debug!(" AT END, adding last prev={prev:?}");
// Take `pending_dups` so that we can drain it while calling self methods. // Drain any remaining dups into the output.
// It is never used as a field after this point. for dup in self.pending_dups.drain(..) {
for dup in std::mem::take(&mut self.pending_dups) {
debug!(" ...adding at least one pending dup={:?}", dup); debug!(" ...adding at least one pending dup={:?}", dup);
self.push_refined_span(dup); self.refined_spans.push(dup);
} }
// Async functions wrap a closure that implements the body to be executed. The enclosing // Async functions wrap a closure that implements the body to be executed. The enclosing
@ -343,9 +342,20 @@ fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
}; };
if !body_ends_with_closure { if !body_ends_with_closure {
self.push_refined_span(prev); self.refined_spans.push(prev);
} }
// Do one last merge pass, to simplify the output.
self.refined_spans.dedup_by(|b, a| {
if a.is_mergeable(b) {
debug!(?a, ?b, "merging list-adjacent refined spans");
a.merge_from(b);
true
} else {
false
}
});
// Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
// regions for the current function leave room for the closure's own coverage regions // regions for the current function leave room for the closure's own coverage regions
// (injected separately, from the closure's own MIR). // (injected separately, from the closure's own MIR).
@ -353,18 +363,6 @@ fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
self.refined_spans self.refined_spans
} }
fn push_refined_span(&mut self, covspan: CoverageSpan) {
if let Some(last) = self.refined_spans.last_mut()
&& last.is_mergeable(&covspan)
{
// Instead of pushing the new span, merge it with the last refined span.
debug!(?last, ?covspan, "merging new refined span with last refined span");
last.merge_from(covspan);
} else {
self.refined_spans.push(covspan);
}
}
/// If `curr` is part of a new macro expansion, carve out and push a separate /// If `curr` is part of a new macro expansion, carve out and push a separate
/// span that ends just after the macro name and its subsequent `!`. /// span that ends just after the macro name and its subsequent `!`.
fn maybe_push_macro_name_span(&mut self) { fn maybe_push_macro_name_span(&mut self) {
@ -397,7 +395,7 @@ fn maybe_push_macro_name_span(&mut self) {
" and curr starts a new macro expansion, so add a new span just for \ " and curr starts a new macro expansion, so add a new span just for \
the macro `{visible_macro}!`, new span={macro_name_cov:?}", the macro `{visible_macro}!`, new span={macro_name_cov:?}",
); );
self.push_refined_span(macro_name_cov); self.refined_spans.push(macro_name_cov);
} }
fn curr(&self) -> &CoverageSpan { fn curr(&self) -> &CoverageSpan {
@ -454,19 +452,14 @@ fn maybe_flush_pending_dups(&mut self) {
previous iteration, or prev started a new disjoint span" previous iteration, or prev started a new disjoint span"
); );
if last_dup.span.hi() <= self.curr().span.lo() { if last_dup.span.hi() <= self.curr().span.lo() {
// Temporarily steal `pending_dups` into a local, so that we can for dup in self.pending_dups.drain(..) {
// drain it while calling other self methods.
let mut pending_dups = std::mem::take(&mut self.pending_dups);
for dup in pending_dups.drain(..) {
debug!(" ...adding at least one pending={:?}", dup); debug!(" ...adding at least one pending={:?}", dup);
self.push_refined_span(dup); self.refined_spans.push(dup);
} }
// The list of dups is now empty, but we can recycle its capacity.
assert!(pending_dups.is_empty() && self.pending_dups.is_empty());
self.pending_dups = pending_dups;
} else { } else {
self.pending_dups.clear(); self.pending_dups.clear();
} }
assert!(self.pending_dups.is_empty());
} }
/// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order. /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
@ -513,22 +506,18 @@ fn carve_out_span_for_closure(&mut self) {
let has_pre_closure_span = prev.span.lo() < right_cutoff; let has_pre_closure_span = prev.span.lo() < right_cutoff;
let has_post_closure_span = prev.span.hi() > right_cutoff; let has_post_closure_span = prev.span.hi() > right_cutoff;
// Temporarily steal `pending_dups` into a local, so that we can
// mutate and/or drain it while calling other self methods.
let mut pending_dups = std::mem::take(&mut self.pending_dups);
if has_pre_closure_span { if has_pre_closure_span {
let mut pre_closure = self.prev().clone(); let mut pre_closure = self.prev().clone();
pre_closure.span = pre_closure.span.with_hi(left_cutoff); pre_closure.span = pre_closure.span.with_hi(left_cutoff);
debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure); debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
if !pending_dups.is_empty() {
for mut dup in pending_dups.iter().cloned() { for mut dup in self.pending_dups.iter().cloned() {
dup.span = dup.span.with_hi(left_cutoff); dup.span = dup.span.with_hi(left_cutoff);
debug!(" ...and at least one pre_closure dup={:?}", dup); debug!(" ...and at least one pre_closure dup={:?}", dup);
self.push_refined_span(dup); self.refined_spans.push(dup);
}
} }
self.push_refined_span(pre_closure);
self.refined_spans.push(pre_closure);
} }
if has_post_closure_span { if has_post_closure_span {
@ -537,19 +526,17 @@ fn carve_out_span_for_closure(&mut self) {
// about how the `CoverageSpan`s are ordered.) // about how the `CoverageSpan`s are ordered.)
self.prev_mut().span = self.prev().span.with_lo(right_cutoff); self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev()); debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev());
for dup in pending_dups.iter_mut() {
for dup in &mut self.pending_dups {
debug!(" ...and at least one overlapping dup={:?}", dup); debug!(" ...and at least one overlapping dup={:?}", dup);
dup.span = dup.span.with_lo(right_cutoff); dup.span = dup.span.with_lo(right_cutoff);
} }
let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
self.push_refined_span(closure_covspan); // since self.prev() was already updated
} else {
pending_dups.clear();
}
// Restore the modified post-closure spans, or the empty vector's capacity. let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
assert!(self.pending_dups.is_empty()); self.refined_spans.push(closure_covspan); // since self.prev() was already updated
self.pending_dups = pending_dups; } else {
self.pending_dups.clear();
}
} }
/// Called if `curr.span` equals `prev_original_span` (and potentially equal to all /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
@ -645,7 +632,7 @@ fn cutoff_prev_at_overlapping_curr(&mut self) {
} else { } else {
debug!(" ... adding modified prev={:?}", self.prev()); debug!(" ... adding modified prev={:?}", self.prev());
let prev = self.take_prev(); let prev = self.take_prev();
self.push_refined_span(prev); self.refined_spans.push(prev);
} }
} else { } else {
// with `pending_dups`, `prev` cannot have any statements that don't overlap // with `pending_dups`, `prev` cannot have any statements that don't overlap