diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs index 1142d492160..269cb8f2380 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs @@ -684,8 +684,8 @@ pub fn reconstruct_format_args_template_string(pieces: &[FormatArgsPiece]) -> St for piece in pieces { match piece { FormatArgsPiece::Literal(s) => { - for c in s.as_str().escape_debug() { - template.push(c); + for c in s.as_str().chars() { + template.extend(c.escape_debug()); if let '{' | '}' = c { template.push(c); } diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index 97ebd42d077..34c28bce5d8 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -314,9 +314,10 @@ fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) { /// fn into_iter<'a>(&'a self) -> Self::Iter<'a>; /// } /// ``` -fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRef]) { +fn check_gat_where_clauses(tcx: TyCtxt<'_>, trait_def_id: LocalDefId) { // Associates every GAT's def_id to a list of possibly missing bounds detected by this lint. let mut required_bounds_by_item = FxHashMap::default(); + let associated_items = tcx.associated_items(trait_def_id); // Loop over all GATs together, because if this lint suggests adding a where-clause bound // to one GAT, it might then require us to an additional bound on another GAT. @@ -325,8 +326,8 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe // those GATs. loop { let mut should_continue = false; - for gat_item in associated_items { - let gat_def_id = gat_item.id.owner_id; + for gat_item in associated_items.in_definition_order() { + let gat_def_id = gat_item.def_id.expect_local(); let gat_item = tcx.associated_item(gat_def_id); // If this item is not an assoc ty, or has no args, then it's not a GAT if gat_item.kind != ty::AssocKind::Type { @@ -342,8 +343,8 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe // This is calculated by taking the intersection of the bounds that each item // constrains the GAT with individually. let mut new_required_bounds: Option>> = None; - for item in associated_items { - let item_def_id = item.id.owner_id; + for item in associated_items.in_definition_order() { + let item_def_id = item.def_id.expect_local(); // Skip our own GAT, since it does not constrain itself at all. if item_def_id == gat_def_id { continue; @@ -351,9 +352,9 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe let param_env = tcx.param_env(item_def_id); - let item_required_bounds = match item.kind { + let item_required_bounds = match tcx.associated_item(item_def_id).kind { // In our example, this corresponds to `into_iter` method - hir::AssocItemKind::Fn { .. } => { + ty::AssocKind::Fn => { // For methods, we check the function signature's return type for any GATs // to constrain. In the `into_iter` case, we see that the return type // `Self::Iter<'a>` is a GAT we want to gather any potential missing bounds from. @@ -369,12 +370,12 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe // We also assume that all of the function signature's parameter types // are well formed. &sig.inputs().iter().copied().collect(), - gat_def_id.def_id, + gat_def_id, gat_generics, ) } // In our example, this corresponds to the `Iter` and `Item` associated types - hir::AssocItemKind::Type => { + ty::AssocKind::Type => { // If our associated item is a GAT with missing bounds, add them to // the param-env here. This allows this GAT to propagate missing bounds // to other GATs. @@ -391,11 +392,11 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe .instantiate_identity_iter_copied() .collect::>(), &FxIndexSet::default(), - gat_def_id.def_id, + gat_def_id, gat_generics, ) } - hir::AssocItemKind::Const => None, + ty::AssocKind::Const => None, }; if let Some(item_required_bounds) = item_required_bounds { @@ -431,7 +432,12 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe } for (gat_def_id, required_bounds) in required_bounds_by_item { - let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id.def_id); + // Don't suggest adding `Self: 'a` to a GAT that can't be named + if tcx.is_impl_trait_in_trait(gat_def_id.to_def_id()) { + continue; + } + + let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id); debug!(?required_bounds); let param_env = tcx.param_env(gat_def_id); @@ -441,21 +447,16 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => { !region_known_to_outlive( tcx, - gat_def_id.def_id, + gat_def_id, param_env, &FxIndexSet::default(), a, b, ) } - ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(a, b)) => !ty_known_to_outlive( - tcx, - gat_def_id.def_id, - param_env, - &FxIndexSet::default(), - a, - b, - ), + ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(a, b)) => { + !ty_known_to_outlive(tcx, gat_def_id, param_env, &FxIndexSet::default(), a, b) + } _ => bug!("Unexpected ClauseKind"), }) .map(|clause| clause.to_string()) @@ -534,7 +535,7 @@ fn augment_param_env<'tcx>( fn gather_gat_bounds<'tcx, T: TypeFoldable>>( tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, - item_def_id: hir::OwnerId, + item_def_id: LocalDefId, to_check: T, wf_tys: &FxIndexSet>, gat_def_id: LocalDefId, @@ -567,7 +568,7 @@ fn gather_gat_bounds<'tcx, T: TypeFoldable>>( // reflected in a where clause on the GAT itself. for (ty, ty_idx) in &types { // In our example, requires that `Self: 'a` - if ty_known_to_outlive(tcx, item_def_id.def_id, param_env, &wf_tys, *ty, *region_a) { + if ty_known_to_outlive(tcx, item_def_id, param_env, &wf_tys, *ty, *region_a) { debug!(?ty_idx, ?region_a_idx); debug!("required clause: {ty} must outlive {region_a}"); // Translate into the generic parameters of the GAT. In @@ -606,14 +607,7 @@ fn gather_gat_bounds<'tcx, T: TypeFoldable>>( if matches!(**region_b, ty::ReStatic | ty::ReError(_)) || region_a == region_b { continue; } - if region_known_to_outlive( - tcx, - item_def_id.def_id, - param_env, - &wf_tys, - *region_a, - *region_b, - ) { + if region_known_to_outlive(tcx, item_def_id, param_env, &wf_tys, *region_a, *region_b) { debug!(?region_a_idx, ?region_b_idx); debug!("required clause: {region_a} must outlive {region_b}"); // Translate into the generic parameters of the GAT. @@ -1114,8 +1108,8 @@ fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) { }); // Only check traits, don't check trait aliases - if let hir::ItemKind::Trait(_, _, _, _, items) = item.kind { - check_gat_where_clauses(tcx, items); + if let hir::ItemKind::Trait(..) = item.kind { + check_gat_where_clauses(tcx, item.owner_id.def_id); } } diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 506bcea0e39..1d1be8f2492 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -3,7 +3,7 @@ use rustc_data_structures::graph::WithNumNodes; use rustc_index::IndexVec; use rustc_middle::mir::{self, AggregateKind, Rvalue, Statement, StatementKind}; -use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol}; +use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol, DUMMY_SP}; use super::graph::{BasicCoverageBlock, CoverageGraph, START_BCB}; @@ -215,9 +215,6 @@ struct CoverageSpansGenerator<'a> { /// is mutated. prev_original_span: Span, - /// A copy of the expn_span from the prior iteration. - prev_expn_span: Option, - /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list. /// If a new `curr` span also fits this criteria (compared to an existing list of @@ -272,13 +269,12 @@ pub(super) fn generate_coverage_spans( body_span, basic_coverage_blocks, sorted_spans_iter: sorted_spans.into_iter(), - refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2), some_curr: None, - curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)), + curr_original_span: DUMMY_SP, some_prev: None, - prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)), - prev_expn_span: None, + prev_original_span: DUMMY_SP, pending_dups: Vec::new(), + refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2), }; coverage_spans.to_refined_spans() @@ -288,43 +284,48 @@ pub(super) fn generate_coverage_spans( /// de-duplicated `CoverageSpan`s. fn to_refined_spans(mut self) -> Vec { while self.next_coverage_span() { + // For the first span we don't have `prev` set, so most of the + // span-processing steps don't make sense yet. if self.some_prev.is_none() { debug!(" initial span"); - self.check_invoked_macro_name_span(); - } else if self.curr().is_mergeable(self.prev()) { - debug!(" same bcb (and neither is a closure), merge with prev={:?}", self.prev()); + self.maybe_push_macro_name_span(); + continue; + } + + // The remaining cases assume that `prev` and `curr` are set. + let prev = self.prev(); + let curr = self.curr(); + + if curr.is_mergeable(prev) { + debug!(" same bcb (and neither is a closure), merge with prev={prev:?}"); let prev = self.take_prev(); self.curr_mut().merge_from(prev); - self.check_invoked_macro_name_span(); + self.maybe_push_macro_name_span(); // Note that curr.span may now differ from curr_original_span - } else if self.prev_ends_before_curr() { + } else if prev.span.hi() <= curr.span.lo() { debug!( - " different bcbs and disjoint spans, so keep curr for next iter, and add \ - prev={:?}", - self.prev() + " different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}", ); let prev = self.take_prev(); self.push_refined_span(prev); - self.check_invoked_macro_name_span(); - } else if self.prev().is_closure { + self.maybe_push_macro_name_span(); + } else if prev.is_closure { // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the // next iter debug!( - " curr overlaps a closure (prev). Drop curr and keep prev for next iter. \ - prev={:?}", - self.prev() + " curr overlaps a closure (prev). Drop curr and keep prev for next iter. prev={prev:?}", ); - self.take_curr(); - } else if self.curr().is_closure { + self.take_curr(); // Discards curr. + } else if curr.is_closure { self.carve_out_span_for_closure(); - } else if self.prev_original_span == self.curr().span { + } else if self.prev_original_span == curr.span { // Note that this compares the new (`curr`) span to `prev_original_span`. // In this branch, the actual span byte range of `prev_original_span` is not // important. What is important is knowing whether the new `curr` span was // **originally** the same as the original span of `prev()`. The original spans // reflect their original sort order, and for equal spans, conveys a partial // ordering based on CFG dominator priority. - if self.prev().is_macro_expansion() && self.curr().is_macro_expansion() { + if prev.is_macro_expansion() && curr.is_macro_expansion() { // Macros that expand to include branching (such as // `assert_eq!()`, `assert_ne!()`, `info!()`, `debug!()`, or // `trace!()`) typically generate callee spans with identical @@ -338,23 +339,24 @@ fn to_refined_spans(mut self) -> Vec { debug!( " curr and prev are part of a macro expansion, and curr has the same span \ as prev, but is in a different bcb. Drop curr and keep prev for next iter. \ - prev={:?}", - self.prev() + prev={prev:?}", ); - self.take_curr(); + self.take_curr(); // Discards curr. } else { - self.hold_pending_dups_unless_dominated(); + self.update_pending_dups(); } } else { self.cutoff_prev_at_overlapping_curr(); - self.check_invoked_macro_name_span(); + self.maybe_push_macro_name_span(); } } - debug!(" AT END, adding last prev={:?}", self.prev()); let prev = self.take_prev(); - let pending_dups = self.pending_dups.split_off(0); - for dup in pending_dups { + debug!(" AT END, adding last prev={prev:?}"); + + // Take `pending_dups` so that we can drain it while calling self methods. + // It is never used as a field after this point. + for dup in std::mem::take(&mut self.pending_dups) { debug!(" ...adding at least one pending dup={:?}", dup); self.push_refined_span(dup); } @@ -384,43 +386,40 @@ fn to_refined_spans(mut self) -> Vec { } fn push_refined_span(&mut self, covspan: CoverageSpan) { - let len = self.refined_spans.len(); - if len > 0 { - let last = &mut self.refined_spans[len - 1]; - if last.is_mergeable(&covspan) { - debug!( - "merging new refined span with last refined span, last={:?}, covspan={:?}", - last, covspan - ); - last.merge_from(covspan); - return; - } + if let Some(last) = self.refined_spans.last_mut() + && last.is_mergeable(&covspan) + { + // Instead of pushing the new span, merge it with the last refined span. + debug!(?last, ?covspan, "merging new refined span with last refined span"); + last.merge_from(covspan); + } else { + self.refined_spans.push(covspan); } - self.refined_spans.push(covspan) } - fn check_invoked_macro_name_span(&mut self) { - if let Some(visible_macro) = self.curr().visible_macro(self.body_span) { - if !self - .prev_expn_span - .is_some_and(|prev_expn_span| self.curr().expn_span.ctxt() == prev_expn_span.ctxt()) - { - let merged_prefix_len = self.curr_original_span.lo() - self.curr().span.lo(); - let after_macro_bang = - merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1); - let mut macro_name_cov = self.curr().clone(); - self.curr_mut().span = - self.curr().span.with_lo(self.curr().span.lo() + after_macro_bang); - macro_name_cov.span = - macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang); - debug!( - " and curr starts a new macro expansion, so add a new span just for \ - the macro `{}!`, new span={:?}", - visible_macro, macro_name_cov - ); - self.push_refined_span(macro_name_cov); - } + /// If `curr` is part of a new macro expansion, carve out and push a separate + /// span that ends just after the macro name and its subsequent `!`. + fn maybe_push_macro_name_span(&mut self) { + let curr = self.curr(); + + let Some(visible_macro) = curr.visible_macro(self.body_span) else { return }; + if let Some(prev) = &self.some_prev + && prev.expn_span.ctxt() == curr.expn_span.ctxt() + { + return; } + + let merged_prefix_len = self.curr_original_span.lo() - curr.span.lo(); + let after_macro_bang = merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1); + let mut macro_name_cov = curr.clone(); + self.curr_mut().span = curr.span.with_lo(curr.span.lo() + after_macro_bang); + macro_name_cov.span = + macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang); + debug!( + " and curr starts a new macro expansion, so add a new span just for \ + the macro `{visible_macro}!`, new span={macro_name_cov:?}", + ); + self.push_refined_span(macro_name_cov); } fn curr(&self) -> &CoverageSpan { @@ -435,6 +434,12 @@ fn curr_mut(&mut self) -> &mut CoverageSpan { .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) } + /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the + /// `curr` coverage span. + fn take_curr(&mut self) -> CoverageSpan { + self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) + } + fn prev(&self) -> &CoverageSpan { self.some_prev .as_ref() @@ -460,84 +465,78 @@ fn take_prev(&mut self) -> CoverageSpan { /// `pending_dups` could have as few as one span) /// In either case, no more spans will match the span of `pending_dups`, so /// add the `pending_dups` if they don't overlap `curr`, and clear the list. - fn check_pending_dups(&mut self) { - if let Some(dup) = self.pending_dups.last() - && dup.span != self.prev().span - { - debug!( - " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \ - previous iteration, or prev started a new disjoint span" - ); - if dup.span.hi() <= self.curr().span.lo() { - let pending_dups = self.pending_dups.split_off(0); - for dup in pending_dups.into_iter() { - debug!(" ...adding at least one pending={:?}", dup); - self.push_refined_span(dup); - } - } else { - self.pending_dups.clear(); + fn maybe_flush_pending_dups(&mut self) { + let Some(last_dup) = self.pending_dups.last() else { return }; + if last_dup.span == self.prev().span { + return; + } + + debug!( + " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \ + previous iteration, or prev started a new disjoint span" + ); + if last_dup.span.hi() <= self.curr().span.lo() { + // Temporarily steal `pending_dups` into a local, so that we can + // drain it while calling other self methods. + let mut pending_dups = std::mem::take(&mut self.pending_dups); + for dup in pending_dups.drain(..) { + debug!(" ...adding at least one pending={:?}", dup); + self.push_refined_span(dup); } + // The list of dups is now empty, but we can recycle its capacity. + assert!(pending_dups.is_empty() && self.pending_dups.is_empty()); + self.pending_dups = pending_dups; + } else { + self.pending_dups.clear(); } } /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order. fn next_coverage_span(&mut self) -> bool { if let Some(curr) = self.some_curr.take() { - self.prev_expn_span = Some(curr.expn_span); self.some_prev = Some(curr); self.prev_original_span = self.curr_original_span; } while let Some(curr) = self.sorted_spans_iter.next() { debug!("FOR curr={:?}", curr); - if self.some_prev.is_some() && self.prev_starts_after_next(&curr) { + if let Some(prev) = &self.some_prev && prev.span.lo() > curr.span.lo() { + // Skip curr because prev has already advanced beyond the end of curr. + // This can only happen if a prior iteration updated `prev` to skip past + // a region of code, such as skipping past a closure. debug!( " prev.span starts after curr.span, so curr will be dropped (skipping past \ - closure?); prev={:?}", - self.prev() + closure?); prev={prev:?}", ); } else { // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed // by `self.curr_mut().merge_from(prev)`. self.curr_original_span = curr.span; self.some_curr.replace(curr); - self.check_pending_dups(); + self.maybe_flush_pending_dups(); return true; } } false } - /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the - /// `curr` coverage span. - fn take_curr(&mut self) -> CoverageSpan { - self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) - } - - /// Returns true if the curr span should be skipped because prev has already advanced beyond the - /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region - /// of code, such as skipping past a closure. - fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool { - self.prev().span.lo() > next_curr.span.lo() - } - - /// Returns true if the curr span starts past the end of the prev span, which means they don't - /// overlap, so we now know the prev can be added to the refined coverage spans. - fn prev_ends_before_curr(&self) -> bool { - self.prev().span.hi() <= self.curr().span.lo() - } - /// If `prev`s span extends left of the closure (`curr`), carve out the closure's span from /// `prev`'s span. (The closure's coverage counters will be injected when processing the /// closure's own MIR.) Add the portion of the span to the left of the closure; and if the span /// extends to the right of the closure, update `prev` to that portion of the span. For any /// `pending_dups`, repeat the same process. fn carve_out_span_for_closure(&mut self) { - let curr_span = self.curr().span; - let left_cutoff = curr_span.lo(); - let right_cutoff = curr_span.hi(); - let has_pre_closure_span = self.prev().span.lo() < right_cutoff; - let has_post_closure_span = self.prev().span.hi() > right_cutoff; - let mut pending_dups = self.pending_dups.split_off(0); + let prev = self.prev(); + let curr = self.curr(); + + let left_cutoff = curr.span.lo(); + let right_cutoff = curr.span.hi(); + let has_pre_closure_span = prev.span.lo() < right_cutoff; + let has_post_closure_span = prev.span.hi() > right_cutoff; + + // Temporarily steal `pending_dups` into a local, so that we can + // mutate and/or drain it while calling other self methods. + let mut pending_dups = std::mem::take(&mut self.pending_dups); + if has_pre_closure_span { let mut pre_closure = self.prev().clone(); pre_closure.span = pre_closure.span.with_hi(left_cutoff); @@ -551,6 +550,7 @@ fn carve_out_span_for_closure(&mut self) { } self.push_refined_span(pre_closure); } + if has_post_closure_span { // Mutate `prev.span()` to start after the closure (and discard curr). // (**NEVER** update `prev_original_span` because it affects the assumptions @@ -561,12 +561,15 @@ fn carve_out_span_for_closure(&mut self) { debug!(" ...and at least one overlapping dup={:?}", dup); dup.span = dup.span.with_lo(right_cutoff); } - self.pending_dups.append(&mut pending_dups); - let closure_covspan = self.take_curr(); + let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev. self.push_refined_span(closure_covspan); // since self.prev() was already updated } else { pending_dups.clear(); } + + // Restore the modified post-closure spans, or the empty vector's capacity. + assert!(self.pending_dups.is_empty()); + self.pending_dups = pending_dups; } /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all @@ -583,26 +586,28 @@ fn carve_out_span_for_closure(&mut self) { /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held, /// until their disposition is determined. In this latter case, the `prev` dup is moved into /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration. - fn hold_pending_dups_unless_dominated(&mut self) { + fn update_pending_dups(&mut self) { + let prev_bcb = self.prev().bcb; + let curr_bcb = self.curr().bcb; + // Equal coverage spans are ordered by dominators before dominated (if any), so it should be // impossible for `curr` to dominate any previous `CoverageSpan`. - debug_assert!(!self.span_bcb_dominates(self.curr(), self.prev())); + debug_assert!(!self.basic_coverage_blocks.dominates(curr_bcb, prev_bcb)); let initial_pending_count = self.pending_dups.len(); if initial_pending_count > 0 { - let mut pending_dups = self.pending_dups.split_off(0); - pending_dups.retain(|dup| !self.span_bcb_dominates(dup, self.curr())); - self.pending_dups.append(&mut pending_dups); - if self.pending_dups.len() < initial_pending_count { + self.pending_dups + .retain(|dup| !self.basic_coverage_blocks.dominates(dup.bcb, curr_bcb)); + + let n_discarded = initial_pending_count - self.pending_dups.len(); + if n_discarded > 0 { debug!( - " discarded {} of {} pending_dups that dominated curr", - initial_pending_count - self.pending_dups.len(), - initial_pending_count + " discarded {n_discarded} of {initial_pending_count} pending_dups that dominated curr", ); } } - if self.span_bcb_dominates(self.prev(), self.curr()) { + if self.basic_coverage_blocks.dominates(prev_bcb, curr_bcb) { debug!( " different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}", self.prev() @@ -667,8 +672,4 @@ fn cutoff_prev_at_overlapping_curr(&mut self) { self.pending_dups.clear(); } } - - fn span_bcb_dominates(&self, dom_covspan: &CoverageSpan, covspan: &CoverageSpan) -> bool { - self.basic_coverage_blocks.dominates(dom_covspan.bcb, covspan.bcb) - } } diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs index cbfb06eac7b..9c9b78f4152 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs @@ -180,8 +180,9 @@ fn on_unimplemented_note( flags.push((sym::cause, Some("MainFunctionType".to_string()))); } - // Add all types without trimmed paths. - ty::print::with_no_trimmed_paths!({ + // Add all types without trimmed paths or visible paths, ensuring they end up with + // their "canonical" def path. + ty::print::with_no_trimmed_paths!(ty::print::with_no_visible_paths!({ let generics = self.tcx.generics_of(def_id); let self_ty = trait_ref.self_ty(); // This is also included through the generics list as `Self`, @@ -296,7 +297,7 @@ fn on_unimplemented_note( { flags.push((sym::_Self, Some("&[{integral}]".to_owned()))); } - }); + })); if let Ok(Some(command)) = OnUnimplementedDirective::of_item(self.tcx, def_id) { command.evaluate(self.tcx, trait_ref, &flags) @@ -578,7 +579,9 @@ pub fn evaluate( Some(tcx.features()), &mut |cfg| { let value = cfg.value.map(|v| { - OnUnimplementedFormatString(v).format(tcx, trait_ref, &options_map) + // `with_no_visible_paths` is also used when generating the options, + // so we need to match it here. + ty::print::with_no_visible_paths!(OnUnimplementedFormatString(v).format(tcx, trait_ref, &options_map)) }); options.contains(&(cfg.name, value)) diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs index 9407c1609c2..89125b7955e 100644 --- a/library/core/src/convert/mod.rs +++ b/library/core/src/convert/mod.rs @@ -573,7 +573,7 @@ pub trait Into: Sized { #[rustc_diagnostic_item = "From"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented(on( - all(_Self = "&str", any(T = "alloc::string::String", T = "std::string::String")), + all(_Self = "&str", T = "alloc::string::String"), note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix", ))] pub trait From: Sized { diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index ac1fc26a1ef..c7ace58afa8 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -27,13 +27,13 @@ fn _assert_is_object_safe(_: &dyn Iterator) {} #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( on( - any(_Self = "core::ops::RangeTo", _Self = "std::ops::RangeTo"), + _Self = "core::ops::range::RangeTo", label = "if you meant to iterate until a value, add a starting value", note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \ bounded `Range`: `0..end`" ), on( - any(_Self = "core::ops::RangeToInclusive", _Self = "std::ops::RangeToInclusive"), + _Self = "core::ops::range::RangeToInclusive", label = "if you meant to iterate until a value (including it), add a starting value", note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \ to have a bounded `RangeInclusive`: `0..=end`" @@ -44,7 +44,7 @@ fn _assert_is_object_safe(_: &dyn Iterator) {} ), on(_Self = "&[]", label = "`{Self}` is not an iterator; try calling `.iter()`"), on( - any(_Self = "alloc::vec::Vec", _Self = "std::vec::Vec"), + _Self = "alloc::vec::Vec", label = "`{Self}` is not an iterator; try calling `.into_iter()` or `.iter()`" ), on( @@ -52,7 +52,7 @@ fn _assert_is_object_safe(_: &dyn Iterator) {} label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( - any(_Self = "alloc::string::String", _Self = "std::string::String"), + _Self = "alloc::string::String", label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index 5ed82e26a0a..f1594501d40 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -573,59 +573,59 @@ impl Copy for &T {} #[lang = "sync"] #[rustc_on_unimplemented( on( - any(_Self = "core::cell:OnceCell", _Self = "std::cell::OnceCell"), + _Self = "core::cell::once::OnceCell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::OnceLock` instead" ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU8` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU16` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU32` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU64` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicUsize` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI8` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI16` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI32` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI64` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicIsize` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicBool` instead", ), on( - any(_Self = "core::cell::Cell", _Self = "std::cell::Cell"), + _Self = "core::cell::Cell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock`", ), on( - any(_Self = "core::cell::RefCell", _Self = "std::cell::RefCell"), + _Self = "core::cell::RefCell", note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` instead", ), message = "`{Self}` cannot be shared between threads safely", diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs index f4649be54d5..6ceee463729 100644 --- a/library/core/src/ops/index.rs +++ b/library/core/src/ops/index.rs @@ -153,7 +153,7 @@ pub trait Index { see chapter in The Book " ), on( - any(_Self = "alloc::string::String", _Self = "std::string::String"), + _Self = "alloc::string::String", note = "you can use `.chars().nth()` or `.bytes().nth()` see chapter in The Book " ), diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs index 17625daccbc..3f8c8efd416 100644 --- a/library/core/src/ops/try_trait.rs +++ b/library/core/src/ops/try_trait.rs @@ -226,14 +226,8 @@ pub trait Try: FromResidual { on( all( from_desugaring = "QuestionMark", - any( - _Self = "core::result::Result", - _Self = "std::result::Result", - ), - any( - R = "core::option::Option", - R = "std::option::Option", - ) + _Self = "core::result::Result", + R = "core::option::Option", ), message = "the `?` operator can only be used on `Result`s, not `Option`s, \ in {ItemContext} that returns `Result`", @@ -243,10 +237,7 @@ pub trait Try: FromResidual { on( all( from_desugaring = "QuestionMark", - any( - _Self = "core::result::Result", - _Self = "std::result::Result", - ) + _Self = "core::result::Result", ), // There's a special error message in the trait selection code for // `From` in `?`, so this is not shown for result-in-result errors, @@ -259,14 +250,8 @@ pub trait Try: FromResidual { on( all( from_desugaring = "QuestionMark", - any( - _Self = "core::option::Option", - _Self = "std::option::Option", - ), - any( - R = "core::result::Result", - R = "std::result::Result", - ) + _Self = "core::option::Option", + R = "core::result::Result", ), message = "the `?` operator can only be used on `Option`s, not `Result`s, \ in {ItemContext} that returns `Option`", @@ -276,10 +261,7 @@ pub trait Try: FromResidual { on( all( from_desugaring = "QuestionMark", - any( - _Self = "core::option::Option", - _Self = "std::option::Option", - ) + _Self = "core::option::Option", ), // `Option`-in-`Option` always works, as there's only one possible // residual, so this can also be phrased strongly. @@ -291,14 +273,8 @@ pub trait Try: FromResidual { on( all( from_desugaring = "QuestionMark", - any( - _Self = "core::ops::ControlFlow", - _Self = "std::ops::ControlFlow", - ), - any( - R = "core::ops::ControlFlow", - R = "std::ops::ControlFlow", - ) + _Self = "core::ops::control_flow::ControlFlow", + R = "core::ops::control_flow::ControlFlow", ), message = "the `?` operator in {ItemContext} that returns `ControlFlow` \ can only be used on other `ControlFlow`s (with the same Break type)", @@ -309,10 +285,7 @@ pub trait Try: FromResidual { on( all( from_desugaring = "QuestionMark", - any( - _Self = "core::ops::ControlFlow", - _Self = "std::ops::ControlFlow", - ) + _Self = "core::ops::control_flow::ControlFlow", // `R` is not a `ControlFlow`, as that case was matched previously ), message = "the `?` operator can only be used on `ControlFlow`s \ diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs index d313e8e012f..1da3a87e117 100644 --- a/library/core/src/slice/index.rs +++ b/library/core/src/slice/index.rs @@ -152,10 +152,7 @@ impl Sealed for ops::IndexRange {} #[rustc_on_unimplemented( on(T = "str", label = "string indices are ranges of `usize`",), on( - all( - any(T = "str", T = "&str", T = "alloc::string::String", T = "std::string::String"), - _Self = "{integer}" - ), + all(any(T = "str", T = "&str", T = "alloc::string::String"), _Self = "{integer}"), note = "you can use `.chars().nth()` or `.bytes().nth()`\n\ for more information, see chapter 8 in The Book: \ " diff --git a/src/tools/rustdoc-gui/tester.js b/src/tools/rustdoc-gui/tester.js index af1bc05ddb2..8f6626f6296 100644 --- a/src/tools/rustdoc-gui/tester.js +++ b/src/tools/rustdoc-gui/tester.js @@ -249,12 +249,17 @@ async function main(argv) { console.log("`--no-headless` option is active, disabling concurrency for running tests."); } - console.log(`Running ${files.length} rustdoc-gui (${opts["jobs"]} concurrently) ...`); - if (opts["jobs"] < 1) { + const len = files.length; + console.log( + `Running ${len} rustdoc-gui (UNBOUNDED concurrency; use "-j#" for a limit) ...`, + ); process.setMaxListeners(files.length + 1); } else if (headless) { + console.log(`Running ${files.length} rustdoc-gui (${opts["jobs"]} concurrently) ...`); process.setMaxListeners(opts["jobs"] + 1); + } else { + console.log(`Running ${files.length} rustdoc-gui ...`); } // We catch this "event" to display a nicer message in case of unexpected exit (because of a diff --git a/tests/pretty/format-args-str-escape.pp b/tests/pretty/format-args-str-escape.pp new file mode 100644 index 00000000000..b84bc2303b7 --- /dev/null +++ b/tests/pretty/format-args-str-escape.pp @@ -0,0 +1,21 @@ +#![feature(prelude_import)] +#![no_std] +#[prelude_import] +use ::std::prelude::rust_2015::*; +#[macro_use] +extern crate std; +// pretty-compare-only +// pretty-mode:expanded +// pp-exact:format-args-str-escape.pp + +fn main() { + { ::std::io::_print(format_args!("\u{1b}[1mHello, world!\u{1b}[0m\n")); }; + { ::std::io::_print(format_args!("\u{1b}[1mHello, world!\u{1b}[0m\n")); }; + { + ::std::io::_print(format_args!("Not an escape sequence: \\u{{1B}}[1mbold\\x1B[0m\n")); + }; + { + ::std::io::_print(format_args!("{0}\n", + "\x1B[1mHello, world!\x1B[0m")); + }; +} diff --git a/tests/pretty/format-args-str-escape.rs b/tests/pretty/format-args-str-escape.rs new file mode 100644 index 00000000000..e596fcfd8bc --- /dev/null +++ b/tests/pretty/format-args-str-escape.rs @@ -0,0 +1,10 @@ +// pretty-compare-only +// pretty-mode:expanded +// pp-exact:format-args-str-escape.pp + +fn main() { + println!("\x1B[1mHello, world!\x1B[0m"); + println!("\u{1B}[1mHello, world!\u{1B}[0m"); + println!("Not an escape sequence: \\u{{1B}}[1mbold\\x1B[0m"); + println!("{}", "\x1B[1mHello, world!\x1B[0m"); +} diff --git a/tests/ui/impl-trait/in-trait/gat-outlives.rs b/tests/ui/impl-trait/in-trait/gat-outlives.rs new file mode 100644 index 00000000000..83dd6cfce53 --- /dev/null +++ b/tests/ui/impl-trait/in-trait/gat-outlives.rs @@ -0,0 +1,17 @@ +// edition: 2021 + +use std::future::Future; + +trait Trait { + type Gat<'a>; + //~^ ERROR missing required bound on `Gat` + async fn foo(&self) -> Self::Gat<'_>; +} + +trait Trait2 { + type Gat<'a>; + //~^ ERROR missing required bound on `Gat` + async fn foo(&self) -> impl Future>; +} + +fn main() {} diff --git a/tests/ui/impl-trait/in-trait/gat-outlives.stderr b/tests/ui/impl-trait/in-trait/gat-outlives.stderr new file mode 100644 index 00000000000..8ec4b0ab2ee --- /dev/null +++ b/tests/ui/impl-trait/in-trait/gat-outlives.stderr @@ -0,0 +1,24 @@ +error: missing required bound on `Gat` + --> $DIR/gat-outlives.rs:6:5 + | +LL | type Gat<'a>; + | ^^^^^^^^^^^^- + | | + | help: add the required where clause: `where Self: 'a` + | + = note: this bound is currently required to ensure that impls have maximum flexibility + = note: we are soliciting feedback, see issue #87479 for more information + +error: missing required bound on `Gat` + --> $DIR/gat-outlives.rs:12:5 + | +LL | type Gat<'a>; + | ^^^^^^^^^^^^- + | | + | help: add the required where clause: `where Self: 'a` + | + = note: this bound is currently required to ensure that impls have maximum flexibility + = note: we are soliciting feedback, see issue #87479 for more information + +error: aborting due to 2 previous errors + diff --git a/triagebot.toml b/triagebot.toml index b17b415c0ff..fbdc28787ff 100644 --- a/triagebot.toml +++ b/triagebot.toml @@ -556,7 +556,7 @@ cc = ["@davidtwco", "@compiler-errors", "@JohnTitor", "@TaKO8Ki"] [mentions."compiler/rustc_smir"] message = "This PR changes Stable MIR" -cc = ["@oli-obk", "@celinval", "@spastorino"] +cc = ["@oli-obk", "@celinval", "@spastorino", "@ouz-a"] [mentions."compiler/stable_mir"] message = "This PR changes Stable MIR"