Auto merge of #114492 - compiler-errors:rollup-lp4sfla, r=compiler-errors
Rollup of 5 pull requests Successful merges: - #114287 (update overflow handling in the new trait solver) - #114475 (Migrate GUI colors test to original CSS color format) - #114482 (Fix ui-fulldeps missing the `internal_features` lint on stage 0) - #114490 (Fix a typo in the error reporting for sealed traits.) - #114491 (Rename issue #114423 test files to include context) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
fca59ab5f0
@ -1,7 +1,6 @@
|
|||||||
use std::ops::ControlFlow;
|
use std::ops::ControlFlow;
|
||||||
|
|
||||||
use rustc_data_structures::intern::Interned;
|
use rustc_data_structures::intern::Interned;
|
||||||
use rustc_query_system::cache::Cache;
|
|
||||||
|
|
||||||
use crate::infer::canonical::{CanonicalVarValues, QueryRegionConstraints};
|
use crate::infer::canonical::{CanonicalVarValues, QueryRegionConstraints};
|
||||||
use crate::traits::query::NoSolution;
|
use crate::traits::query::NoSolution;
|
||||||
@ -11,9 +10,10 @@
|
|||||||
TypeVisitor,
|
TypeVisitor,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mod cache;
|
||||||
pub mod inspect;
|
pub mod inspect;
|
||||||
|
|
||||||
pub type EvaluationCache<'tcx> = Cache<CanonicalInput<'tcx>, QueryResult<'tcx>>;
|
pub use cache::{CacheData, EvaluationCache};
|
||||||
|
|
||||||
/// A goal is a statement, i.e. `predicate`, we want to prove
|
/// A goal is a statement, i.e. `predicate`, we want to prove
|
||||||
/// given some assumptions, i.e. `param_env`.
|
/// given some assumptions, i.e. `param_env`.
|
||||||
|
100
compiler/rustc_middle/src/traits/solve/cache.rs
Normal file
100
compiler/rustc_middle/src/traits/solve/cache.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
use super::{CanonicalInput, QueryResult};
|
||||||
|
use crate::ty::TyCtxt;
|
||||||
|
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||||
|
use rustc_data_structures::sync::Lock;
|
||||||
|
use rustc_query_system::cache::WithDepNode;
|
||||||
|
use rustc_query_system::dep_graph::DepNodeIndex;
|
||||||
|
use rustc_session::Limit;
|
||||||
|
/// The trait solver cache used by `-Ztrait-solver=next`.
|
||||||
|
///
|
||||||
|
/// FIXME(@lcnr): link to some official documentation of how
|
||||||
|
/// this works.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct EvaluationCache<'tcx> {
|
||||||
|
map: Lock<FxHashMap<CanonicalInput<'tcx>, CacheEntry<'tcx>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CacheData<'tcx> {
|
||||||
|
pub result: QueryResult<'tcx>,
|
||||||
|
pub reached_depth: usize,
|
||||||
|
pub encountered_overflow: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'tcx> EvaluationCache<'tcx> {
|
||||||
|
/// Insert a final result into the global cache.
|
||||||
|
pub fn insert(
|
||||||
|
&self,
|
||||||
|
key: CanonicalInput<'tcx>,
|
||||||
|
reached_depth: usize,
|
||||||
|
did_overflow: bool,
|
||||||
|
cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
|
||||||
|
dep_node: DepNodeIndex,
|
||||||
|
result: QueryResult<'tcx>,
|
||||||
|
) {
|
||||||
|
let mut map = self.map.borrow_mut();
|
||||||
|
let entry = map.entry(key).or_default();
|
||||||
|
let data = WithDepNode::new(dep_node, result);
|
||||||
|
entry.cycle_participants.extend(cycle_participants);
|
||||||
|
if did_overflow {
|
||||||
|
entry.with_overflow.insert(reached_depth, data);
|
||||||
|
} else {
|
||||||
|
entry.success = Some(Success { data, reached_depth });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to fetch a cached result, checking the recursion limit
|
||||||
|
/// and handling root goals of coinductive cycles.
|
||||||
|
///
|
||||||
|
/// If this returns `Some` the cache result can be used.
|
||||||
|
pub fn get(
|
||||||
|
&self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
key: CanonicalInput<'tcx>,
|
||||||
|
cycle_participant_in_stack: impl FnOnce(&FxHashSet<CanonicalInput<'tcx>>) -> bool,
|
||||||
|
available_depth: Limit,
|
||||||
|
) -> Option<CacheData<'tcx>> {
|
||||||
|
let map = self.map.borrow();
|
||||||
|
let entry = map.get(&key)?;
|
||||||
|
|
||||||
|
if cycle_participant_in_stack(&entry.cycle_participants) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref success) = entry.success {
|
||||||
|
if available_depth.value_within_limit(success.reached_depth) {
|
||||||
|
return Some(CacheData {
|
||||||
|
result: success.data.get(tcx),
|
||||||
|
reached_depth: success.reached_depth,
|
||||||
|
encountered_overflow: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.with_overflow.get(&available_depth.0).map(|e| CacheData {
|
||||||
|
result: e.get(tcx),
|
||||||
|
reached_depth: available_depth.0,
|
||||||
|
encountered_overflow: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Success<'tcx> {
|
||||||
|
data: WithDepNode<QueryResult<'tcx>>,
|
||||||
|
reached_depth: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The cache entry for a goal `CanonicalInput`.
|
||||||
|
///
|
||||||
|
/// This contains results whose computation never hit the
|
||||||
|
/// recursion limit in `success`, and all results which hit
|
||||||
|
/// the recursion limit in `with_overflow`.
|
||||||
|
#[derive(Default)]
|
||||||
|
struct CacheEntry<'tcx> {
|
||||||
|
success: Option<Success<'tcx>>,
|
||||||
|
/// We have to be careful when caching roots of cycles.
|
||||||
|
///
|
||||||
|
/// See the doc comment of `StackEntry::cycle_participants` for more
|
||||||
|
/// details.
|
||||||
|
cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
|
||||||
|
with_overflow: FxHashMap<usize, WithDepNode<QueryResult<'tcx>>>,
|
||||||
|
}
|
@ -1338,12 +1338,25 @@ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'tcx> ToPredicate<'tcx> for ProjectionPredicate<'tcx> {
|
||||||
|
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
|
||||||
|
ty::Binder::dummy(PredicateKind::Clause(ClauseKind::Projection(self))).to_predicate(tcx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
|
impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
|
||||||
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
|
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
|
||||||
self.map_bound(|p| PredicateKind::Clause(ClauseKind::Projection(p))).to_predicate(tcx)
|
self.map_bound(|p| PredicateKind::Clause(ClauseKind::Projection(p))).to_predicate(tcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for ProjectionPredicate<'tcx> {
|
||||||
|
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
|
||||||
|
let p: Predicate<'tcx> = self.to_predicate(tcx);
|
||||||
|
p.expect_clause()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyProjectionPredicate<'tcx> {
|
impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyProjectionPredicate<'tcx> {
|
||||||
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
|
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
|
||||||
let p: Predicate<'tcx> = self.to_predicate(tcx);
|
let p: Predicate<'tcx> = self.to_predicate(tcx);
|
||||||
|
@ -162,7 +162,7 @@ fn normalizes_to_inner(
|
|||||||
self.add_goal(Goal::new(
|
self.add_goal(Goal::new(
|
||||||
self.tcx(),
|
self.tcx(),
|
||||||
param_env,
|
param_env,
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate { projection_ty: alias, term: other }),
|
ty::ProjectionPredicate { projection_ty: alias, term: other },
|
||||||
));
|
));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
//! Code shared by trait and projection goals for candidate assembly.
|
//! Code shared by trait and projection goals for candidate assembly.
|
||||||
|
|
||||||
use super::search_graph::OverflowHandler;
|
|
||||||
use super::{EvalCtxt, SolverMode};
|
use super::{EvalCtxt, SolverMode};
|
||||||
use crate::traits::coherence;
|
use crate::traits::coherence;
|
||||||
use rustc_hir::def_id::DefId;
|
use rustc_hir::def_id::DefId;
|
||||||
@ -315,7 +314,7 @@ pub(super) fn assemble_and_evaluate_candidates<G: GoalKind<'tcx>>(
|
|||||||
return ambig;
|
return ambig;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut candidates = self.assemble_candidates_via_self_ty(goal);
|
let mut candidates = self.assemble_candidates_via_self_ty(goal, 0);
|
||||||
|
|
||||||
self.assemble_blanket_impl_candidates(goal, &mut candidates);
|
self.assemble_blanket_impl_candidates(goal, &mut candidates);
|
||||||
|
|
||||||
@ -351,6 +350,7 @@ fn assemble_self_ty_infer_ambiguity_response<G: GoalKind<'tcx>>(
|
|||||||
fn assemble_candidates_via_self_ty<G: GoalKind<'tcx>>(
|
fn assemble_candidates_via_self_ty<G: GoalKind<'tcx>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
goal: Goal<'tcx, G>,
|
goal: Goal<'tcx, G>,
|
||||||
|
num_steps: usize,
|
||||||
) -> Vec<Candidate<'tcx>> {
|
) -> Vec<Candidate<'tcx>> {
|
||||||
debug_assert_eq!(goal, self.resolve_vars_if_possible(goal));
|
debug_assert_eq!(goal, self.resolve_vars_if_possible(goal));
|
||||||
if let Some(ambig) = self.assemble_self_ty_infer_ambiguity_response(goal) {
|
if let Some(ambig) = self.assemble_self_ty_infer_ambiguity_response(goal) {
|
||||||
@ -369,7 +369,7 @@ fn assemble_candidates_via_self_ty<G: GoalKind<'tcx>>(
|
|||||||
|
|
||||||
self.assemble_coherence_unknowable_candidates(goal, &mut candidates);
|
self.assemble_coherence_unknowable_candidates(goal, &mut candidates);
|
||||||
|
|
||||||
self.assemble_candidates_after_normalizing_self_ty(goal, &mut candidates);
|
self.assemble_candidates_after_normalizing_self_ty(goal, &mut candidates, num_steps);
|
||||||
|
|
||||||
candidates
|
candidates
|
||||||
}
|
}
|
||||||
@ -393,49 +393,40 @@ fn assemble_candidates_after_normalizing_self_ty<G: GoalKind<'tcx>>(
|
|||||||
&mut self,
|
&mut self,
|
||||||
goal: Goal<'tcx, G>,
|
goal: Goal<'tcx, G>,
|
||||||
candidates: &mut Vec<Candidate<'tcx>>,
|
candidates: &mut Vec<Candidate<'tcx>>,
|
||||||
|
num_steps: usize,
|
||||||
) {
|
) {
|
||||||
let tcx = self.tcx();
|
let tcx = self.tcx();
|
||||||
let &ty::Alias(_, projection_ty) = goal.predicate.self_ty().kind() else { return };
|
let &ty::Alias(_, projection_ty) = goal.predicate.self_ty().kind() else { return };
|
||||||
|
|
||||||
let normalized_self_candidates: Result<_, NoSolution> =
|
candidates.extend(self.probe(|_| CandidateKind::NormalizedSelfTyAssembly).enter(|ecx| {
|
||||||
self.probe(|_| CandidateKind::NormalizedSelfTyAssembly).enter(|ecx| {
|
if num_steps < ecx.local_overflow_limit() {
|
||||||
ecx.with_incremented_depth(
|
let normalized_ty = ecx.next_ty_infer();
|
||||||
|ecx| {
|
let normalizes_to_goal = goal.with(
|
||||||
let result = ecx.evaluate_added_goals_and_make_canonical_response(
|
tcx,
|
||||||
Certainty::OVERFLOW,
|
ty::ProjectionPredicate { projection_ty, term: normalized_ty.into() },
|
||||||
)?;
|
);
|
||||||
Ok(vec![Candidate {
|
ecx.add_goal(normalizes_to_goal);
|
||||||
source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
if let Err(NoSolution) = ecx.try_evaluate_added_goals() {
|
||||||
result,
|
debug!("self type normalization failed");
|
||||||
}])
|
return vec![];
|
||||||
},
|
}
|
||||||
|ecx| {
|
let normalized_ty = ecx.resolve_vars_if_possible(normalized_ty);
|
||||||
let normalized_ty = ecx.next_ty_infer();
|
debug!(?normalized_ty, "self type normalized");
|
||||||
let normalizes_to_goal = goal.with(
|
// NOTE: Alternatively we could call `evaluate_goal` here and only
|
||||||
tcx,
|
// have a `Normalized` candidate. This doesn't work as long as we
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
// use `CandidateSource` in winnowing.
|
||||||
projection_ty,
|
let goal = goal.with(tcx, goal.predicate.with_self_ty(tcx, normalized_ty));
|
||||||
term: normalized_ty.into(),
|
ecx.assemble_candidates_via_self_ty(goal, num_steps + 1)
|
||||||
}),
|
} else {
|
||||||
);
|
match ecx.evaluate_added_goals_and_make_canonical_response(Certainty::OVERFLOW) {
|
||||||
ecx.add_goal(normalizes_to_goal);
|
Ok(result) => vec![Candidate {
|
||||||
let _ = ecx.try_evaluate_added_goals().inspect_err(|_| {
|
source: CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||||
debug!("self type normalization failed");
|
result,
|
||||||
})?;
|
}],
|
||||||
let normalized_ty = ecx.resolve_vars_if_possible(normalized_ty);
|
Err(NoSolution) => vec![],
|
||||||
debug!(?normalized_ty, "self type normalized");
|
}
|
||||||
// NOTE: Alternatively we could call `evaluate_goal` here and only
|
}
|
||||||
// have a `Normalized` candidate. This doesn't work as long as we
|
}));
|
||||||
// use `CandidateSource` in winnowing.
|
|
||||||
let goal = goal.with(tcx, goal.predicate.with_self_ty(tcx, normalized_ty));
|
|
||||||
Ok(ecx.assemble_candidates_via_self_ty(goal))
|
|
||||||
},
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Ok(normalized_self_candidates) = normalized_self_candidates {
|
|
||||||
candidates.extend(normalized_self_candidates);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip_all)]
|
#[instrument(level = "debug", skip_all)]
|
||||||
@ -533,7 +524,7 @@ fn assemble_non_blanket_impl_candidates<G: GoalKind<'tcx>>(
|
|||||||
ty::Alias(_, _) | ty::Placeholder(..) | ty::Error(_) => (),
|
ty::Alias(_, _) | ty::Placeholder(..) | ty::Error(_) => (),
|
||||||
|
|
||||||
// FIXME: These should ideally not exist as a self type. It would be nice for
|
// FIXME: These should ideally not exist as a self type. It would be nice for
|
||||||
// the builtin auto trait impls of generators should instead directly recurse
|
// the builtin auto trait impls of generators to instead directly recurse
|
||||||
// into the witness.
|
// into the witness.
|
||||||
ty::GeneratorWitness(_) | ty::GeneratorWitnessMIR(_, _) => (),
|
ty::GeneratorWitness(_) | ty::GeneratorWitnessMIR(_, _) => (),
|
||||||
|
|
||||||
|
@ -1,20 +1,21 @@
|
|||||||
|
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||||
use rustc_infer::infer::at::ToTrace;
|
use rustc_infer::infer::at::ToTrace;
|
||||||
use rustc_infer::infer::canonical::CanonicalVarValues;
|
use rustc_infer::infer::canonical::CanonicalVarValues;
|
||||||
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
|
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
|
||||||
use rustc_infer::infer::{
|
use rustc_infer::infer::{
|
||||||
DefineOpaqueTypes, InferCtxt, InferOk, LateBoundRegionConversionTime, RegionVariableOrigin,
|
DefineOpaqueTypes, InferCtxt, InferOk, LateBoundRegionConversionTime, TyCtxtInferExt,
|
||||||
TyCtxtInferExt,
|
|
||||||
};
|
};
|
||||||
use rustc_infer::traits::query::NoSolution;
|
use rustc_infer::traits::query::NoSolution;
|
||||||
use rustc_infer::traits::ObligationCause;
|
use rustc_infer::traits::ObligationCause;
|
||||||
|
use rustc_middle::infer::canonical::CanonicalVarInfos;
|
||||||
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
|
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
|
||||||
use rustc_middle::traits::solve::inspect;
|
use rustc_middle::traits::solve::inspect;
|
||||||
use rustc_middle::traits::solve::{
|
use rustc_middle::traits::solve::{
|
||||||
CanonicalInput, CanonicalResponse, Certainty, IsNormalizesToHack, PredefinedOpaques,
|
CanonicalInput, CanonicalResponse, Certainty, IsNormalizesToHack, PredefinedOpaques,
|
||||||
PredefinedOpaquesData, QueryResult,
|
PredefinedOpaquesData, QueryResult,
|
||||||
};
|
};
|
||||||
use rustc_middle::traits::DefiningAnchor;
|
use rustc_middle::traits::{specialization_graph, DefiningAnchor};
|
||||||
use rustc_middle::ty::{
|
use rustc_middle::ty::{
|
||||||
self, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable, TypeVisitable,
|
self, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable, TypeVisitable,
|
||||||
TypeVisitableExt, TypeVisitor,
|
TypeVisitableExt, TypeVisitor,
|
||||||
@ -24,11 +25,10 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::ops::ControlFlow;
|
use std::ops::ControlFlow;
|
||||||
|
|
||||||
use crate::traits::specialization_graph;
|
|
||||||
use crate::traits::vtable::{count_own_vtable_entries, prepare_vtable_segments, VtblSegment};
|
use crate::traits::vtable::{count_own_vtable_entries, prepare_vtable_segments, VtblSegment};
|
||||||
|
|
||||||
use super::inspect::ProofTreeBuilder;
|
use super::inspect::ProofTreeBuilder;
|
||||||
use super::search_graph::{self, OverflowHandler};
|
use super::search_graph;
|
||||||
use super::SolverMode;
|
use super::SolverMode;
|
||||||
use super::{search_graph::SearchGraph, Goal};
|
use super::{search_graph::SearchGraph, Goal};
|
||||||
pub use select::InferCtxtSelectExt;
|
pub use select::InferCtxtSelectExt;
|
||||||
@ -55,6 +55,9 @@ pub struct EvalCtxt<'a, 'tcx> {
|
|||||||
/// the job already.
|
/// the job already.
|
||||||
infcx: &'a InferCtxt<'tcx>,
|
infcx: &'a InferCtxt<'tcx>,
|
||||||
|
|
||||||
|
/// The variable info for the `var_values`, only used to make an ambiguous response
|
||||||
|
/// with no constraints.
|
||||||
|
variables: CanonicalVarInfos<'tcx>,
|
||||||
pub(super) var_values: CanonicalVarValues<'tcx>,
|
pub(super) var_values: CanonicalVarValues<'tcx>,
|
||||||
|
|
||||||
predefined_opaques_in_body: PredefinedOpaques<'tcx>,
|
predefined_opaques_in_body: PredefinedOpaques<'tcx>,
|
||||||
@ -171,6 +174,10 @@ pub(super) fn solver_mode(&self) -> SolverMode {
|
|||||||
self.search_graph.solver_mode()
|
self.search_graph.solver_mode()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(super) fn local_overflow_limit(&self) -> usize {
|
||||||
|
self.search_graph.local_overflow_limit()
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a root evaluation context and search graph. This should only be
|
/// Creates a root evaluation context and search graph. This should only be
|
||||||
/// used from outside of any evaluation, and other methods should be preferred
|
/// used from outside of any evaluation, and other methods should be preferred
|
||||||
/// over using this manually (such as [`InferCtxtEvalExt::evaluate_root_goal`]).
|
/// over using this manually (such as [`InferCtxtEvalExt::evaluate_root_goal`]).
|
||||||
@ -184,18 +191,19 @@ fn enter_root<R>(
|
|||||||
|
|
||||||
let mut ecx = EvalCtxt {
|
let mut ecx = EvalCtxt {
|
||||||
search_graph: &mut search_graph,
|
search_graph: &mut search_graph,
|
||||||
infcx: infcx,
|
infcx,
|
||||||
|
nested_goals: NestedGoals::new(),
|
||||||
|
inspect: ProofTreeBuilder::new_maybe_root(infcx.tcx, generate_proof_tree),
|
||||||
|
|
||||||
// Only relevant when canonicalizing the response,
|
// Only relevant when canonicalizing the response,
|
||||||
// which we don't do within this evaluation context.
|
// which we don't do within this evaluation context.
|
||||||
predefined_opaques_in_body: infcx
|
predefined_opaques_in_body: infcx
|
||||||
.tcx
|
.tcx
|
||||||
.mk_predefined_opaques_in_body(PredefinedOpaquesData::default()),
|
.mk_predefined_opaques_in_body(PredefinedOpaquesData::default()),
|
||||||
// Only relevant when canonicalizing the response.
|
|
||||||
max_input_universe: ty::UniverseIndex::ROOT,
|
max_input_universe: ty::UniverseIndex::ROOT,
|
||||||
|
variables: ty::List::empty(),
|
||||||
var_values: CanonicalVarValues::dummy(),
|
var_values: CanonicalVarValues::dummy(),
|
||||||
nested_goals: NestedGoals::new(),
|
|
||||||
tainted: Ok(()),
|
tainted: Ok(()),
|
||||||
inspect: ProofTreeBuilder::new_maybe_root(infcx.tcx, generate_proof_tree),
|
|
||||||
};
|
};
|
||||||
let result = f(&mut ecx);
|
let result = f(&mut ecx);
|
||||||
|
|
||||||
@ -245,6 +253,7 @@ fn enter_canonical<R>(
|
|||||||
|
|
||||||
let mut ecx = EvalCtxt {
|
let mut ecx = EvalCtxt {
|
||||||
infcx,
|
infcx,
|
||||||
|
variables: canonical_input.variables,
|
||||||
var_values,
|
var_values,
|
||||||
predefined_opaques_in_body: input.predefined_opaques_in_body,
|
predefined_opaques_in_body: input.predefined_opaques_in_body,
|
||||||
max_input_universe: canonical_input.max_universe,
|
max_input_universe: canonical_input.max_universe,
|
||||||
@ -300,24 +309,26 @@ fn evaluate_canonical_goal(
|
|||||||
// Deal with overflow, caching, and coinduction.
|
// Deal with overflow, caching, and coinduction.
|
||||||
//
|
//
|
||||||
// The actual solver logic happens in `ecx.compute_goal`.
|
// The actual solver logic happens in `ecx.compute_goal`.
|
||||||
search_graph.with_new_goal(
|
ensure_sufficient_stack(|| {
|
||||||
tcx,
|
search_graph.with_new_goal(
|
||||||
canonical_input,
|
tcx,
|
||||||
goal_evaluation,
|
canonical_input,
|
||||||
|search_graph, goal_evaluation| {
|
goal_evaluation,
|
||||||
EvalCtxt::enter_canonical(
|
|search_graph, goal_evaluation| {
|
||||||
tcx,
|
EvalCtxt::enter_canonical(
|
||||||
search_graph,
|
tcx,
|
||||||
canonical_input,
|
search_graph,
|
||||||
goal_evaluation,
|
canonical_input,
|
||||||
|ecx, goal| {
|
goal_evaluation,
|
||||||
let result = ecx.compute_goal(goal);
|
|ecx, goal| {
|
||||||
ecx.inspect.query_result(result);
|
let result = ecx.compute_goal(goal);
|
||||||
result
|
ecx.inspect.query_result(result);
|
||||||
},
|
result
|
||||||
)
|
},
|
||||||
},
|
)
|
||||||
)
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recursively evaluates `goal`, returning whether any inference vars have
|
/// Recursively evaluates `goal`, returning whether any inference vars have
|
||||||
@ -329,6 +340,7 @@ fn evaluate_goal(
|
|||||||
) -> Result<(bool, Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
|
) -> Result<(bool, Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
|
||||||
let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
|
let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
|
||||||
let mut goal_evaluation = self.inspect.new_goal_evaluation(goal, is_normalizes_to_hack);
|
let mut goal_evaluation = self.inspect.new_goal_evaluation(goal, is_normalizes_to_hack);
|
||||||
|
let encountered_overflow = self.search_graph.encountered_overflow();
|
||||||
let canonical_response = EvalCtxt::evaluate_canonical_goal(
|
let canonical_response = EvalCtxt::evaluate_canonical_goal(
|
||||||
self.tcx(),
|
self.tcx(),
|
||||||
self.search_graph,
|
self.search_graph,
|
||||||
@ -377,6 +389,7 @@ fn evaluate_goal(
|
|||||||
&& !self.search_graph.in_cycle()
|
&& !self.search_graph.in_cycle()
|
||||||
{
|
{
|
||||||
debug!("rerunning goal to check result is stable");
|
debug!("rerunning goal to check result is stable");
|
||||||
|
self.search_graph.reset_encountered_overflow(encountered_overflow);
|
||||||
let (_orig_values, canonical_goal) = self.canonicalize_goal(goal);
|
let (_orig_values, canonical_goal) = self.canonicalize_goal(goal);
|
||||||
let new_canonical_response = EvalCtxt::evaluate_canonical_goal(
|
let new_canonical_response = EvalCtxt::evaluate_canonical_goal(
|
||||||
self.tcx(),
|
self.tcx(),
|
||||||
@ -471,101 +484,22 @@ pub(super) fn try_evaluate_added_goals(&mut self) -> Result<Certainty, NoSolutio
|
|||||||
let inspect = self.inspect.new_evaluate_added_goals();
|
let inspect = self.inspect.new_evaluate_added_goals();
|
||||||
let inspect = core::mem::replace(&mut self.inspect, inspect);
|
let inspect = core::mem::replace(&mut self.inspect, inspect);
|
||||||
|
|
||||||
let mut goals = core::mem::replace(&mut self.nested_goals, NestedGoals::new());
|
let mut response = Ok(Certainty::OVERFLOW);
|
||||||
let mut new_goals = NestedGoals::new();
|
for _ in 0..self.local_overflow_limit() {
|
||||||
|
// FIXME: This match is a bit ugly, it might be nice to change the inspect
|
||||||
let response = self.repeat_while_none(
|
// stuff to use a closure instead. which should hopefully simplify this a bit.
|
||||||
|_| Ok(Certainty::OVERFLOW),
|
match self.evaluate_added_goals_step() {
|
||||||
|this| {
|
Ok(Some(cert)) => {
|
||||||
this.inspect.evaluate_added_goals_loop_start();
|
response = Ok(cert);
|
||||||
|
break;
|
||||||
let mut has_changed = Err(Certainty::Yes);
|
|
||||||
|
|
||||||
if let Some(goal) = goals.normalizes_to_hack_goal.take() {
|
|
||||||
// Replace the goal with an unconstrained infer var, so the
|
|
||||||
// RHS does not affect projection candidate assembly.
|
|
||||||
let unconstrained_rhs = this.next_term_infer_of_kind(goal.predicate.term);
|
|
||||||
let unconstrained_goal = goal.with(
|
|
||||||
this.tcx(),
|
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
|
||||||
projection_ty: goal.predicate.projection_ty,
|
|
||||||
term: unconstrained_rhs,
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
let (_, certainty, instantiate_goals) =
|
|
||||||
match this.evaluate_goal(IsNormalizesToHack::Yes, unconstrained_goal) {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(NoSolution) => return Some(Err(NoSolution)),
|
|
||||||
};
|
|
||||||
new_goals.goals.extend(instantiate_goals);
|
|
||||||
|
|
||||||
// Finally, equate the goal's RHS with the unconstrained var.
|
|
||||||
// We put the nested goals from this into goals instead of
|
|
||||||
// next_goals to avoid needing to process the loop one extra
|
|
||||||
// time if this goal returns something -- I don't think this
|
|
||||||
// matters in practice, though.
|
|
||||||
match this.eq_and_get_goals(
|
|
||||||
goal.param_env,
|
|
||||||
goal.predicate.term,
|
|
||||||
unconstrained_rhs,
|
|
||||||
) {
|
|
||||||
Ok(eq_goals) => {
|
|
||||||
goals.goals.extend(eq_goals);
|
|
||||||
}
|
|
||||||
Err(NoSolution) => return Some(Err(NoSolution)),
|
|
||||||
};
|
|
||||||
|
|
||||||
// We only look at the `projection_ty` part here rather than
|
|
||||||
// looking at the "has changed" return from evaluate_goal,
|
|
||||||
// because we expect the `unconstrained_rhs` part of the predicate
|
|
||||||
// to have changed -- that means we actually normalized successfully!
|
|
||||||
if goal.predicate.projection_ty
|
|
||||||
!= this.resolve_vars_if_possible(goal.predicate.projection_ty)
|
|
||||||
{
|
|
||||||
has_changed = Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
match certainty {
|
|
||||||
Certainty::Yes => {}
|
|
||||||
Certainty::Maybe(_) => {
|
|
||||||
// We need to resolve vars here so that we correctly
|
|
||||||
// deal with `has_changed` in the next iteration.
|
|
||||||
new_goals.normalizes_to_hack_goal =
|
|
||||||
Some(this.resolve_vars_if_possible(goal));
|
|
||||||
has_changed = has_changed.map_err(|c| c.unify_with(certainty));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ok(None) => {}
|
||||||
for goal in goals.goals.drain(..) {
|
Err(NoSolution) => {
|
||||||
let (changed, certainty, instantiate_goals) =
|
response = Err(NoSolution);
|
||||||
match this.evaluate_goal(IsNormalizesToHack::No, goal) {
|
break;
|
||||||
Ok(result) => result,
|
|
||||||
Err(NoSolution) => return Some(Err(NoSolution)),
|
|
||||||
};
|
|
||||||
new_goals.goals.extend(instantiate_goals);
|
|
||||||
|
|
||||||
if changed {
|
|
||||||
has_changed = Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
match certainty {
|
|
||||||
Certainty::Yes => {}
|
|
||||||
Certainty::Maybe(_) => {
|
|
||||||
new_goals.goals.push(goal);
|
|
||||||
has_changed = has_changed.map_err(|c| c.unify_with(certainty));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
core::mem::swap(&mut new_goals, &mut goals);
|
}
|
||||||
match has_changed {
|
|
||||||
Ok(()) => None,
|
|
||||||
Err(certainty) => Some(Ok(certainty)),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
self.inspect.eval_added_goals_result(response);
|
self.inspect.eval_added_goals_result(response);
|
||||||
|
|
||||||
@ -576,9 +510,84 @@ pub(super) fn try_evaluate_added_goals(&mut self) -> Result<Certainty, NoSolutio
|
|||||||
let goal_evaluations = std::mem::replace(&mut self.inspect, inspect);
|
let goal_evaluations = std::mem::replace(&mut self.inspect, inspect);
|
||||||
self.inspect.added_goals_evaluation(goal_evaluations);
|
self.inspect.added_goals_evaluation(goal_evaluations);
|
||||||
|
|
||||||
self.nested_goals = goals;
|
|
||||||
response
|
response
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Iterate over all added goals: returning `Ok(Some(_))` in case we can stop rerunning.
|
||||||
|
///
|
||||||
|
/// Goals for the next step get directly added the the nested goals of the `EvalCtxt`.
|
||||||
|
fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution> {
|
||||||
|
let tcx = self.tcx();
|
||||||
|
let mut goals = core::mem::replace(&mut self.nested_goals, NestedGoals::new());
|
||||||
|
|
||||||
|
self.inspect.evaluate_added_goals_loop_start();
|
||||||
|
// If this loop did not result in any progress, what's our final certainty.
|
||||||
|
let mut unchanged_certainty = Some(Certainty::Yes);
|
||||||
|
if let Some(goal) = goals.normalizes_to_hack_goal.take() {
|
||||||
|
// Replace the goal with an unconstrained infer var, so the
|
||||||
|
// RHS does not affect projection candidate assembly.
|
||||||
|
let unconstrained_rhs = self.next_term_infer_of_kind(goal.predicate.term);
|
||||||
|
let unconstrained_goal = goal.with(
|
||||||
|
tcx,
|
||||||
|
ty::ProjectionPredicate {
|
||||||
|
projection_ty: goal.predicate.projection_ty,
|
||||||
|
term: unconstrained_rhs,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let (_, certainty, instantiate_goals) =
|
||||||
|
self.evaluate_goal(IsNormalizesToHack::Yes, unconstrained_goal)?;
|
||||||
|
self.add_goals(instantiate_goals);
|
||||||
|
|
||||||
|
// Finally, equate the goal's RHS with the unconstrained var.
|
||||||
|
// We put the nested goals from this into goals instead of
|
||||||
|
// next_goals to avoid needing to process the loop one extra
|
||||||
|
// time if this goal returns something -- I don't think this
|
||||||
|
// matters in practice, though.
|
||||||
|
let eq_goals =
|
||||||
|
self.eq_and_get_goals(goal.param_env, goal.predicate.term, unconstrained_rhs)?;
|
||||||
|
goals.goals.extend(eq_goals);
|
||||||
|
|
||||||
|
// We only look at the `projection_ty` part here rather than
|
||||||
|
// looking at the "has changed" return from evaluate_goal,
|
||||||
|
// because we expect the `unconstrained_rhs` part of the predicate
|
||||||
|
// to have changed -- that means we actually normalized successfully!
|
||||||
|
if goal.predicate.projection_ty
|
||||||
|
!= self.resolve_vars_if_possible(goal.predicate.projection_ty)
|
||||||
|
{
|
||||||
|
unchanged_certainty = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
match certainty {
|
||||||
|
Certainty::Yes => {}
|
||||||
|
Certainty::Maybe(_) => {
|
||||||
|
// We need to resolve vars here so that we correctly
|
||||||
|
// deal with `has_changed` in the next iteration.
|
||||||
|
self.set_normalizes_to_hack_goal(self.resolve_vars_if_possible(goal));
|
||||||
|
unchanged_certainty = unchanged_certainty.map(|c| c.unify_with(certainty));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for goal in goals.goals.drain(..) {
|
||||||
|
let (has_changed, certainty, instantiate_goals) =
|
||||||
|
self.evaluate_goal(IsNormalizesToHack::No, goal)?;
|
||||||
|
self.add_goals(instantiate_goals);
|
||||||
|
if has_changed {
|
||||||
|
unchanged_certainty = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
match certainty {
|
||||||
|
Certainty::Yes => {}
|
||||||
|
Certainty::Maybe(_) => {
|
||||||
|
self.add_goal(goal);
|
||||||
|
unchanged_certainty = unchanged_certainty.map(|c| c.unify_with(certainty));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(unchanged_certainty)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> EvalCtxt<'_, 'tcx> {
|
impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||||
@ -593,10 +602,6 @@ pub(super) fn next_ty_infer(&self) -> Ty<'tcx> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn next_region_infer(&self) -> ty::Region<'tcx> {
|
|
||||||
self.infcx.next_region_var(RegionVariableOrigin::MiscVariable(DUMMY_SP))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn next_const_infer(&self, ty: Ty<'tcx>) -> ty::Const<'tcx> {
|
pub(super) fn next_const_infer(&self, ty: Ty<'tcx>) -> ty::Const<'tcx> {
|
||||||
self.infcx.next_const_var(
|
self.infcx.next_const_var(
|
||||||
ty,
|
ty,
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
|
//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
|
||||||
use super::{CanonicalInput, Certainty, EvalCtxt, Goal};
|
use super::{CanonicalInput, Certainty, EvalCtxt, Goal};
|
||||||
use crate::solve::canonicalize::{CanonicalizeMode, Canonicalizer};
|
use crate::solve::canonicalize::{CanonicalizeMode, Canonicalizer};
|
||||||
use crate::solve::{CanonicalResponse, QueryResult, Response};
|
use crate::solve::{response_no_constraints_raw, CanonicalResponse, QueryResult, Response};
|
||||||
use rustc_data_structures::fx::FxHashSet;
|
use rustc_data_structures::fx::FxHashSet;
|
||||||
use rustc_index::IndexVec;
|
use rustc_index::IndexVec;
|
||||||
use rustc_infer::infer::canonical::query_response::make_query_region_constraints;
|
use rustc_infer::infer::canonical::query_response::make_query_region_constraints;
|
||||||
@ -124,29 +124,11 @@ pub(in crate::solve) fn make_ambiguous_response_no_constraints(
|
|||||||
&self,
|
&self,
|
||||||
maybe_cause: MaybeCause,
|
maybe_cause: MaybeCause,
|
||||||
) -> CanonicalResponse<'tcx> {
|
) -> CanonicalResponse<'tcx> {
|
||||||
let unconstrained_response = Response {
|
response_no_constraints_raw(
|
||||||
var_values: CanonicalVarValues {
|
self.tcx(),
|
||||||
var_values: self.tcx().mk_args_from_iter(self.var_values.var_values.iter().map(
|
self.max_input_universe,
|
||||||
|arg| -> ty::GenericArg<'tcx> {
|
self.variables,
|
||||||
match arg.unpack() {
|
Certainty::Maybe(maybe_cause),
|
||||||
GenericArgKind::Lifetime(_) => self.next_region_infer().into(),
|
|
||||||
GenericArgKind::Type(_) => self.next_ty_infer().into(),
|
|
||||||
GenericArgKind::Const(ct) => self.next_const_infer(ct.ty()).into(),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
external_constraints: self
|
|
||||||
.tcx()
|
|
||||||
.mk_external_constraints(ExternalConstraintsData::default()),
|
|
||||||
certainty: Certainty::Maybe(maybe_cause),
|
|
||||||
};
|
|
||||||
|
|
||||||
Canonicalizer::canonicalize(
|
|
||||||
self.infcx,
|
|
||||||
CanonicalizeMode::Response { max_input_universe: self.max_input_universe },
|
|
||||||
&mut Default::default(),
|
|
||||||
unconstrained_response,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ pub(in crate::solve) fn enter(self, f: impl FnOnce(&mut EvalCtxt<'_, 'tcx>) -> T
|
|||||||
|
|
||||||
let mut nested_ecx = EvalCtxt {
|
let mut nested_ecx = EvalCtxt {
|
||||||
infcx: outer_ecx.infcx,
|
infcx: outer_ecx.infcx,
|
||||||
|
variables: outer_ecx.variables,
|
||||||
var_values: outer_ecx.var_values,
|
var_values: outer_ecx.var_values,
|
||||||
predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body,
|
predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body,
|
||||||
max_input_universe: outer_ecx.max_input_universe,
|
max_input_universe: outer_ecx.max_input_universe,
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
use crate::solve::assembly::{Candidate, CandidateSource};
|
use crate::solve::assembly::{Candidate, CandidateSource};
|
||||||
use crate::solve::eval_ctxt::{EvalCtxt, GenerateProofTree};
|
use crate::solve::eval_ctxt::{EvalCtxt, GenerateProofTree};
|
||||||
use crate::solve::inspect::ProofTreeBuilder;
|
use crate::solve::inspect::ProofTreeBuilder;
|
||||||
use crate::solve::search_graph::OverflowHandler;
|
|
||||||
use crate::traits::StructurallyNormalizeExt;
|
use crate::traits::StructurallyNormalizeExt;
|
||||||
use crate::traits::TraitEngineExt;
|
use crate::traits::TraitEngineExt;
|
||||||
|
|
||||||
@ -143,7 +142,7 @@ fn compute_canonical_trait_candidates(
|
|||||||
// the cycle anyways one step later.
|
// the cycle anyways one step later.
|
||||||
EvalCtxt::enter_canonical(
|
EvalCtxt::enter_canonical(
|
||||||
self.tcx(),
|
self.tcx(),
|
||||||
self.search_graph(),
|
self.search_graph,
|
||||||
canonical_input,
|
canonical_input,
|
||||||
// FIXME: This is wrong, idk if we even want to track stuff here.
|
// FIXME: This is wrong, idk if we even want to track stuff here.
|
||||||
&mut ProofTreeBuilder::new_noop(),
|
&mut ProofTreeBuilder::new_noop(),
|
||||||
@ -269,7 +268,7 @@ fn rematch_unsize<'tcx>(
|
|||||||
infcx.tcx,
|
infcx.tcx,
|
||||||
ObligationCause::dummy(),
|
ObligationCause::dummy(),
|
||||||
goal.param_env,
|
goal.param_env,
|
||||||
ty::Binder::dummy(ty::OutlivesPredicate(a_ty, region)),
|
ty::OutlivesPredicate(a_ty, region),
|
||||||
));
|
));
|
||||||
|
|
||||||
Ok(Some(ImplSource::Builtin(source, nested)))
|
Ok(Some(ImplSource::Builtin(source, nested)))
|
||||||
|
@ -17,10 +17,11 @@
|
|||||||
use rustc_hir::def_id::DefId;
|
use rustc_hir::def_id::DefId;
|
||||||
use rustc_infer::infer::canonical::{Canonical, CanonicalVarValues};
|
use rustc_infer::infer::canonical::{Canonical, CanonicalVarValues};
|
||||||
use rustc_infer::traits::query::NoSolution;
|
use rustc_infer::traits::query::NoSolution;
|
||||||
|
use rustc_middle::infer::canonical::CanonicalVarInfos;
|
||||||
use rustc_middle::traits::solve::{
|
use rustc_middle::traits::solve::{
|
||||||
CanonicalResponse, Certainty, ExternalConstraintsData, Goal, QueryResult, Response,
|
CanonicalResponse, Certainty, ExternalConstraintsData, Goal, QueryResult, Response,
|
||||||
};
|
};
|
||||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
use rustc_middle::ty::{self, Ty, TyCtxt, UniverseIndex};
|
||||||
use rustc_middle::ty::{
|
use rustc_middle::ty::{
|
||||||
CoercePredicate, RegionOutlivesPredicate, SubtypePredicate, TypeOutlivesPredicate,
|
CoercePredicate, RegionOutlivesPredicate, SubtypePredicate, TypeOutlivesPredicate,
|
||||||
};
|
};
|
||||||
@ -284,20 +285,21 @@ fn flounder(&mut self, responses: &[CanonicalResponse<'tcx>]) -> QueryResult<'tc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn response_no_constraints<'tcx>(
|
fn response_no_constraints_raw<'tcx>(
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
goal: Canonical<'tcx, impl Sized>,
|
max_universe: UniverseIndex,
|
||||||
|
variables: CanonicalVarInfos<'tcx>,
|
||||||
certainty: Certainty,
|
certainty: Certainty,
|
||||||
) -> QueryResult<'tcx> {
|
) -> CanonicalResponse<'tcx> {
|
||||||
Ok(Canonical {
|
Canonical {
|
||||||
max_universe: goal.max_universe,
|
max_universe,
|
||||||
variables: goal.variables,
|
variables,
|
||||||
value: Response {
|
value: Response {
|
||||||
var_values: CanonicalVarValues::make_identity(tcx, goal.variables),
|
var_values: CanonicalVarValues::make_identity(tcx, variables),
|
||||||
// FIXME: maybe we should store the "no response" version in tcx, like
|
// FIXME: maybe we should store the "no response" version in tcx, like
|
||||||
// we do for tcx.types and stuff.
|
// we do for tcx.types and stuff.
|
||||||
external_constraints: tcx.mk_external_constraints(ExternalConstraintsData::default()),
|
external_constraints: tcx.mk_external_constraints(ExternalConstraintsData::default()),
|
||||||
certainty,
|
certainty,
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
@ -75,10 +75,7 @@ fn normalize_alias_ty(
|
|||||||
tcx,
|
tcx,
|
||||||
self.at.cause.clone(),
|
self.at.cause.clone(),
|
||||||
self.at.param_env,
|
self.at.param_env,
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
ty::ProjectionPredicate { projection_ty: alias, term: new_infer_ty.into() },
|
||||||
projection_ty: alias,
|
|
||||||
term: new_infer_ty.into(),
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Do not emit an error if normalization is known to fail but instead
|
// Do not emit an error if normalization is known to fail but instead
|
||||||
@ -131,10 +128,10 @@ fn normalize_unevaluated_const(
|
|||||||
tcx,
|
tcx,
|
||||||
self.at.cause.clone(),
|
self.at.cause.clone(),
|
||||||
self.at.param_env,
|
self.at.param_env,
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
ty::ProjectionPredicate {
|
||||||
projection_ty: tcx.mk_alias_ty(uv.def, uv.args),
|
projection_ty: tcx.mk_alias_ty(uv.def, uv.args),
|
||||||
term: new_infer_ct.into(),
|
term: new_infer_ct.into(),
|
||||||
}),
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
let result = if infcx.predicate_may_hold(&obligation) {
|
let result = if infcx.predicate_may_hold(&obligation) {
|
||||||
|
@ -393,10 +393,7 @@ fn consider_builtin_pointee_candidate(
|
|||||||
None => tcx.types.unit,
|
None => tcx.types.unit,
|
||||||
Some(field_def) => {
|
Some(field_def) => {
|
||||||
let self_ty = field_def.ty(tcx, args);
|
let self_ty = field_def.ty(tcx, args);
|
||||||
ecx.add_goal(goal.with(
|
ecx.add_goal(goal.with(tcx, goal.predicate.with_self_ty(tcx, self_ty)));
|
||||||
tcx,
|
|
||||||
ty::Binder::dummy(goal.predicate.with_self_ty(tcx, self_ty)),
|
|
||||||
));
|
|
||||||
return ecx
|
return ecx
|
||||||
.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
|
.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
|
||||||
}
|
}
|
||||||
@ -406,10 +403,7 @@ fn consider_builtin_pointee_candidate(
|
|||||||
ty::Tuple(elements) => match elements.last() {
|
ty::Tuple(elements) => match elements.last() {
|
||||||
None => tcx.types.unit,
|
None => tcx.types.unit,
|
||||||
Some(&self_ty) => {
|
Some(&self_ty) => {
|
||||||
ecx.add_goal(goal.with(
|
ecx.add_goal(goal.with(tcx, goal.predicate.with_self_ty(tcx, self_ty)));
|
||||||
tcx,
|
|
||||||
ty::Binder::dummy(goal.predicate.with_self_ty(tcx, self_ty)),
|
|
||||||
));
|
|
||||||
return ecx
|
return ecx
|
||||||
.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
|
.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
|
||||||
}
|
}
|
||||||
@ -450,10 +444,10 @@ fn consider_builtin_future_candidate(
|
|||||||
Self::consider_implied_clause(
|
Self::consider_implied_clause(
|
||||||
ecx,
|
ecx,
|
||||||
goal,
|
goal,
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
ty::ProjectionPredicate {
|
||||||
projection_ty: ecx.tcx().mk_alias_ty(goal.predicate.def_id(), [self_ty]),
|
projection_ty: ecx.tcx().mk_alias_ty(goal.predicate.def_id(), [self_ty]),
|
||||||
term,
|
term,
|
||||||
})
|
}
|
||||||
.to_predicate(tcx),
|
.to_predicate(tcx),
|
||||||
// Technically, we need to check that the future type is Sized,
|
// Technically, we need to check that the future type is Sized,
|
||||||
// but that's already proven by the generator being WF.
|
// but that's already proven by the generator being WF.
|
||||||
@ -490,12 +484,12 @@ fn consider_builtin_generator_candidate(
|
|||||||
Self::consider_implied_clause(
|
Self::consider_implied_clause(
|
||||||
ecx,
|
ecx,
|
||||||
goal,
|
goal,
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
ty::ProjectionPredicate {
|
||||||
projection_ty: ecx
|
projection_ty: ecx
|
||||||
.tcx()
|
.tcx()
|
||||||
.mk_alias_ty(goal.predicate.def_id(), [self_ty, generator.resume_ty()]),
|
.mk_alias_ty(goal.predicate.def_id(), [self_ty, generator.resume_ty()]),
|
||||||
term,
|
term,
|
||||||
})
|
}
|
||||||
.to_predicate(tcx),
|
.to_predicate(tcx),
|
||||||
// Technically, we need to check that the future type is Sized,
|
// Technically, we need to check that the future type is Sized,
|
||||||
// but that's already proven by the generator being WF.
|
// but that's already proven by the generator being WF.
|
||||||
|
@ -1,37 +1,51 @@
|
|||||||
mod cache;
|
mod cache;
|
||||||
mod overflow;
|
|
||||||
|
|
||||||
pub(super) use overflow::OverflowHandler;
|
|
||||||
use rustc_middle::traits::solve::inspect::CacheHit;
|
|
||||||
|
|
||||||
use self::cache::ProvisionalEntry;
|
use self::cache::ProvisionalEntry;
|
||||||
use cache::ProvisionalCache;
|
|
||||||
use overflow::OverflowData;
|
|
||||||
use rustc_index::IndexVec;
|
|
||||||
use rustc_middle::dep_graph::DepKind;
|
|
||||||
use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
|
|
||||||
use rustc_middle::ty::TyCtxt;
|
|
||||||
use std::{collections::hash_map::Entry, mem};
|
|
||||||
|
|
||||||
use super::inspect::ProofTreeBuilder;
|
use super::inspect::ProofTreeBuilder;
|
||||||
use super::SolverMode;
|
use super::SolverMode;
|
||||||
|
use cache::ProvisionalCache;
|
||||||
|
use rustc_data_structures::fx::FxHashSet;
|
||||||
|
use rustc_index::Idx;
|
||||||
|
use rustc_index::IndexVec;
|
||||||
|
use rustc_middle::dep_graph::DepKind;
|
||||||
|
use rustc_middle::traits::solve::inspect::CacheHit;
|
||||||
|
use rustc_middle::traits::solve::CacheData;
|
||||||
|
use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
|
||||||
|
use rustc_middle::ty::TyCtxt;
|
||||||
|
use rustc_session::Limit;
|
||||||
|
use std::{collections::hash_map::Entry, mem};
|
||||||
|
|
||||||
rustc_index::newtype_index! {
|
rustc_index::newtype_index! {
|
||||||
pub struct StackDepth {}
|
pub struct StackDepth {}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct StackElem<'tcx> {
|
#[derive(Debug)]
|
||||||
|
struct StackEntry<'tcx> {
|
||||||
input: CanonicalInput<'tcx>,
|
input: CanonicalInput<'tcx>,
|
||||||
|
available_depth: Limit,
|
||||||
|
// The maximum depth reached by this stack entry, only up-to date
|
||||||
|
// for the top of the stack and lazily updated for the rest.
|
||||||
|
reached_depth: StackDepth,
|
||||||
|
encountered_overflow: bool,
|
||||||
has_been_used: bool,
|
has_been_used: bool,
|
||||||
|
|
||||||
|
/// We put only the root goal of a coinductive cycle into the global cache.
|
||||||
|
///
|
||||||
|
/// If we were to use that result when later trying to prove another cycle
|
||||||
|
/// participant, we can end up with unstable query results.
|
||||||
|
///
|
||||||
|
/// See tests/ui/new-solver/coinduction/incompleteness-unstable-result.rs for
|
||||||
|
/// an example of where this is needed.
|
||||||
|
cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) struct SearchGraph<'tcx> {
|
pub(super) struct SearchGraph<'tcx> {
|
||||||
mode: SolverMode,
|
mode: SolverMode,
|
||||||
|
local_overflow_limit: usize,
|
||||||
/// The stack of goals currently being computed.
|
/// The stack of goals currently being computed.
|
||||||
///
|
///
|
||||||
/// An element is *deeper* in the stack if its index is *lower*.
|
/// An element is *deeper* in the stack if its index is *lower*.
|
||||||
stack: IndexVec<StackDepth, StackElem<'tcx>>,
|
stack: IndexVec<StackDepth, StackEntry<'tcx>>,
|
||||||
overflow_data: OverflowData,
|
|
||||||
provisional_cache: ProvisionalCache<'tcx>,
|
provisional_cache: ProvisionalCache<'tcx>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,8 +53,8 @@ impl<'tcx> SearchGraph<'tcx> {
|
|||||||
pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> {
|
pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> {
|
||||||
Self {
|
Self {
|
||||||
mode,
|
mode,
|
||||||
|
local_overflow_limit: tcx.recursion_limit().0.ilog2() as usize,
|
||||||
stack: Default::default(),
|
stack: Default::default(),
|
||||||
overflow_data: OverflowData::new(tcx),
|
|
||||||
provisional_cache: ProvisionalCache::empty(),
|
provisional_cache: ProvisionalCache::empty(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -49,15 +63,38 @@ pub(super) fn solver_mode(&self) -> SolverMode {
|
|||||||
self.mode
|
self.mode
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We do not use the global cache during coherence.
|
pub(super) fn local_overflow_limit(&self) -> usize {
|
||||||
|
self.local_overflow_limit
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the stack and reached depths on cache hits.
|
||||||
|
#[instrument(level = "debug", skip(self))]
|
||||||
|
fn on_cache_hit(&mut self, additional_depth: usize, encountered_overflow: bool) {
|
||||||
|
let reached_depth = self.stack.next_index().plus(additional_depth);
|
||||||
|
if let Some(last) = self.stack.raw.last_mut() {
|
||||||
|
last.reached_depth = last.reached_depth.max(reached_depth);
|
||||||
|
last.encountered_overflow |= encountered_overflow;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pops the highest goal from the stack, lazily updating the
|
||||||
|
/// the next goal in the stack.
|
||||||
///
|
///
|
||||||
|
/// Directly popping from the stack instead of using this method
|
||||||
|
/// would cause us to not track overflow and recursion depth correctly.
|
||||||
|
fn pop_stack(&mut self) -> StackEntry<'tcx> {
|
||||||
|
let elem = self.stack.pop().unwrap();
|
||||||
|
if let Some(last) = self.stack.raw.last_mut() {
|
||||||
|
last.reached_depth = last.reached_depth.max(elem.reached_depth);
|
||||||
|
last.encountered_overflow |= elem.encountered_overflow;
|
||||||
|
}
|
||||||
|
elem
|
||||||
|
}
|
||||||
|
|
||||||
/// The trait solver behavior is different for coherence
|
/// The trait solver behavior is different for coherence
|
||||||
/// so we would have to add the solver mode to the cache key.
|
/// so we use a separate cache. Alternatively we could use
|
||||||
/// This is probably not worth it as trait solving during
|
/// a single cache and share it between coherence and ordinary
|
||||||
/// coherence tends to already be incredibly fast.
|
/// trait solving.
|
||||||
///
|
|
||||||
/// We could add another global cache for coherence instead,
|
|
||||||
/// but that's effort so let's only do it if necessary.
|
|
||||||
pub(super) fn global_cache(&self, tcx: TyCtxt<'tcx>) -> &'tcx EvaluationCache<'tcx> {
|
pub(super) fn global_cache(&self, tcx: TyCtxt<'tcx>) -> &'tcx EvaluationCache<'tcx> {
|
||||||
match self.mode {
|
match self.mode {
|
||||||
SolverMode::Normal => &tcx.new_solver_evaluation_cache,
|
SolverMode::Normal => &tcx.new_solver_evaluation_cache,
|
||||||
@ -87,36 +124,107 @@ pub(super) fn in_cycle(&self) -> bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries putting the new goal on the stack, returning an error if it is already cached.
|
/// Fetches whether the current goal encountered overflow.
|
||||||
///
|
///
|
||||||
/// This correctly updates the provisional cache if there is a cycle.
|
/// This should only be used for the check in `evaluate_goal`.
|
||||||
#[instrument(level = "debug", skip(self, tcx, inspect), ret)]
|
pub(super) fn encountered_overflow(&self) -> bool {
|
||||||
fn try_push_stack(
|
if let Some(last) = self.stack.raw.last() { last.encountered_overflow } else { false }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resets `encountered_overflow` of the current goal.
|
||||||
|
///
|
||||||
|
/// This should only be used for the check in `evaluate_goal`.
|
||||||
|
pub(super) fn reset_encountered_overflow(&mut self, encountered_overflow: bool) {
|
||||||
|
if encountered_overflow {
|
||||||
|
self.stack.raw.last_mut().unwrap().encountered_overflow = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the remaining depth allowed for nested goals.
|
||||||
|
///
|
||||||
|
/// This is generally simply one less than the current depth.
|
||||||
|
/// However, if we encountered overflow, we significantly reduce
|
||||||
|
/// the remaining depth of all nested goals to prevent hangs
|
||||||
|
/// in case there is exponential blowup.
|
||||||
|
fn allowed_depth_for_nested(
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
stack: &IndexVec<StackDepth, StackEntry<'tcx>>,
|
||||||
|
) -> Option<Limit> {
|
||||||
|
if let Some(last) = stack.raw.last() {
|
||||||
|
if last.available_depth.0 == 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(if last.encountered_overflow {
|
||||||
|
Limit(last.available_depth.0 / 4)
|
||||||
|
} else {
|
||||||
|
Limit(last.available_depth.0 - 1)
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Some(tcx.recursion_limit())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Probably the most involved method of the whole solver.
|
||||||
|
///
|
||||||
|
/// Given some goal which is proven via the `prove_goal` closure, this
|
||||||
|
/// handles caching, overflow, and coinductive cycles.
|
||||||
|
pub(super) fn with_new_goal(
|
||||||
&mut self,
|
&mut self,
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
input: CanonicalInput<'tcx>,
|
input: CanonicalInput<'tcx>,
|
||||||
inspect: &mut ProofTreeBuilder<'tcx>,
|
inspect: &mut ProofTreeBuilder<'tcx>,
|
||||||
) -> Result<(), QueryResult<'tcx>> {
|
mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
|
||||||
// Look at the provisional cache to check for cycles.
|
) -> QueryResult<'tcx> {
|
||||||
|
// Check for overflow.
|
||||||
|
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
|
||||||
|
if let Some(last) = self.stack.raw.last_mut() {
|
||||||
|
last.encountered_overflow = true;
|
||||||
|
}
|
||||||
|
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Try to fetch the goal from the global cache.
|
||||||
|
if inspect.use_global_cache() {
|
||||||
|
if let Some(CacheData { result, reached_depth, encountered_overflow }) =
|
||||||
|
self.global_cache(tcx).get(
|
||||||
|
tcx,
|
||||||
|
input,
|
||||||
|
|cycle_participants| {
|
||||||
|
self.stack.iter().any(|entry| cycle_participants.contains(&entry.input))
|
||||||
|
},
|
||||||
|
available_depth,
|
||||||
|
)
|
||||||
|
{
|
||||||
|
self.on_cache_hit(reached_depth, encountered_overflow);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look at the provisional cache to detect cycles.
|
||||||
let cache = &mut self.provisional_cache;
|
let cache = &mut self.provisional_cache;
|
||||||
match cache.lookup_table.entry(input) {
|
match cache.lookup_table.entry(input) {
|
||||||
// No entry, simply push this goal on the stack after dealing with overflow.
|
// No entry, we push this goal on the stack and try to prove it.
|
||||||
Entry::Vacant(v) => {
|
Entry::Vacant(v) => {
|
||||||
if self.overflow_data.has_overflow(self.stack.len()) {
|
let depth = self.stack.next_index();
|
||||||
return Err(self.deal_with_overflow(tcx, input));
|
let entry = StackEntry {
|
||||||
}
|
input,
|
||||||
|
available_depth,
|
||||||
let depth = self.stack.push(StackElem { input, has_been_used: false });
|
reached_depth: depth,
|
||||||
let response = super::response_no_constraints(tcx, input, Certainty::Yes);
|
encountered_overflow: false,
|
||||||
|
has_been_used: false,
|
||||||
|
cycle_participants: Default::default(),
|
||||||
|
};
|
||||||
|
assert_eq!(self.stack.push(entry), depth);
|
||||||
|
let response = Self::response_no_constraints(tcx, input, Certainty::Yes);
|
||||||
let entry_index = cache.entries.push(ProvisionalEntry { response, depth, input });
|
let entry_index = cache.entries.push(ProvisionalEntry { response, depth, input });
|
||||||
v.insert(entry_index);
|
v.insert(entry_index);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
// We have a nested goal which relies on a goal `root` deeper in the stack.
|
// We have a nested goal which relies on a goal `root` deeper in the stack.
|
||||||
//
|
//
|
||||||
// We first store that we may have to rerun `evaluate_goal` for `root` in case the
|
// We first store that we may have to reprove `root` in case the provisional
|
||||||
// provisional response is not equal to the final response. We also update the depth
|
// response is not equal to the final response. We also update the depth of all
|
||||||
// of all goals which recursively depend on our current goal to depend on `root`
|
// goals which recursively depend on our current goal to depend on `root`
|
||||||
// instead.
|
// instead.
|
||||||
//
|
//
|
||||||
// Finally we can return either the provisional response for that goal if we have a
|
// Finally we can return either the provisional response for that goal if we have a
|
||||||
@ -125,13 +233,16 @@ fn try_push_stack(
|
|||||||
inspect.cache_hit(CacheHit::Provisional);
|
inspect.cache_hit(CacheHit::Provisional);
|
||||||
|
|
||||||
let entry_index = *entry_index.get();
|
let entry_index = *entry_index.get();
|
||||||
|
|
||||||
let stack_depth = cache.depth(entry_index);
|
let stack_depth = cache.depth(entry_index);
|
||||||
debug!("encountered cycle with depth {stack_depth:?}");
|
debug!("encountered cycle with depth {stack_depth:?}");
|
||||||
|
|
||||||
cache.add_dependency_of_leaf_on(entry_index);
|
cache.add_dependency_of_leaf_on(entry_index);
|
||||||
|
let mut iter = self.stack.iter_mut();
|
||||||
|
let root = iter.nth(stack_depth.as_usize()).unwrap();
|
||||||
|
for e in iter {
|
||||||
|
root.cycle_participants.insert(e.input);
|
||||||
|
}
|
||||||
|
|
||||||
self.stack[stack_depth].has_been_used = true;
|
|
||||||
// NOTE: The goals on the stack aren't the only goals involved in this cycle.
|
// NOTE: The goals on the stack aren't the only goals involved in this cycle.
|
||||||
// We can also depend on goals which aren't part of the stack but coinductively
|
// We can also depend on goals which aren't part of the stack but coinductively
|
||||||
// depend on the stack themselves. We already checked whether all the goals
|
// depend on the stack themselves. We already checked whether all the goals
|
||||||
@ -142,145 +253,111 @@ fn try_push_stack(
|
|||||||
.iter()
|
.iter()
|
||||||
.all(|g| g.input.value.goal.predicate.is_coinductive(tcx))
|
.all(|g| g.input.value.goal.predicate.is_coinductive(tcx))
|
||||||
{
|
{
|
||||||
Err(cache.provisional_result(entry_index))
|
// If we're in a coinductive cycle, we have to retry proving the current goal
|
||||||
|
// until we reach a fixpoint.
|
||||||
|
self.stack[stack_depth].has_been_used = true;
|
||||||
|
return cache.provisional_result(entry_index);
|
||||||
} else {
|
} else {
|
||||||
Err(super::response_no_constraints(tcx, input, Certainty::OVERFLOW))
|
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// We cannot simply store the result of [super::EvalCtxt::compute_goal] as we have to deal with
|
// This is for global caching, so we properly track query dependencies.
|
||||||
/// coinductive cycles.
|
// Everything that affects the `result` should be performed within this
|
||||||
///
|
// `with_anon_task` closure.
|
||||||
/// When we encounter a coinductive cycle, we have to prove the final result of that cycle
|
let ((final_entry, result), dep_node) =
|
||||||
/// while we are still computing that result. Because of this we continuously recompute the
|
tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
|
||||||
/// cycle until the result of the previous iteration is equal to the final result, at which
|
// When we encounter a coinductive cycle, we have to fetch the
|
||||||
/// point we are done.
|
// result of that cycle while we are still computing it. Because
|
||||||
///
|
// of this we continuously recompute the cycle until the result
|
||||||
/// This function returns `true` if we were able to finalize the goal and `false` if it has
|
// of the previous iteration is equal to the final result, at which
|
||||||
/// updated the provisional cache and we have to recompute the current goal.
|
// point we are done.
|
||||||
///
|
for _ in 0..self.local_overflow_limit() {
|
||||||
/// FIXME: Refer to the rustc-dev-guide entry once it exists.
|
let response = prove_goal(self, inspect);
|
||||||
#[instrument(level = "debug", skip(self, actual_input), ret)]
|
|
||||||
fn try_finalize_goal(
|
|
||||||
&mut self,
|
|
||||||
actual_input: CanonicalInput<'tcx>,
|
|
||||||
response: QueryResult<'tcx>,
|
|
||||||
) -> bool {
|
|
||||||
let stack_elem = self.stack.pop().unwrap();
|
|
||||||
let StackElem { input, has_been_used } = stack_elem;
|
|
||||||
assert_eq!(input, actual_input);
|
|
||||||
|
|
||||||
|
// Check whether the current goal is the root of a cycle and whether
|
||||||
|
// we have to rerun because its provisional result differed from the
|
||||||
|
// final result.
|
||||||
|
//
|
||||||
|
// Also update the response for this goal stored in the provisional
|
||||||
|
// cache.
|
||||||
|
let stack_entry = self.pop_stack();
|
||||||
|
debug_assert_eq!(stack_entry.input, input);
|
||||||
|
let cache = &mut self.provisional_cache;
|
||||||
|
let provisional_entry_index =
|
||||||
|
*cache.lookup_table.get(&stack_entry.input).unwrap();
|
||||||
|
let provisional_entry = &mut cache.entries[provisional_entry_index];
|
||||||
|
let prev_response = mem::replace(&mut provisional_entry.response, response);
|
||||||
|
if stack_entry.has_been_used && prev_response != response {
|
||||||
|
// If so, remove all entries whose result depends on this goal
|
||||||
|
// from the provisional cache...
|
||||||
|
//
|
||||||
|
// That's not completely correct, as a nested goal can also
|
||||||
|
// depend on a goal which is lower in the stack so it doesn't
|
||||||
|
// actually depend on the current goal. This should be fairly
|
||||||
|
// rare and is hopefully not relevant for performance.
|
||||||
|
#[allow(rustc::potential_query_instability)]
|
||||||
|
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
|
||||||
|
cache.entries.truncate(provisional_entry_index.index() + 1);
|
||||||
|
|
||||||
|
// ...and finally push our goal back on the stack and reevaluate it.
|
||||||
|
self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
|
||||||
|
} else {
|
||||||
|
return (stack_entry, response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("canonical cycle overflow");
|
||||||
|
let current_entry = self.pop_stack();
|
||||||
|
let result = Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
|
||||||
|
(current_entry, result)
|
||||||
|
});
|
||||||
|
|
||||||
|
// We're now done with this goal. In case this goal is involved in a larger cycle
|
||||||
|
// do not remove it from the provisional cache and do not add it to the global
|
||||||
|
// cache.
|
||||||
|
//
|
||||||
|
// It is not possible for any nested goal to depend on something deeper on the
|
||||||
|
// stack, as this would have also updated the depth of the current goal.
|
||||||
let cache = &mut self.provisional_cache;
|
let cache = &mut self.provisional_cache;
|
||||||
let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
|
let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
|
||||||
let provisional_entry = &mut cache.entries[provisional_entry_index];
|
let provisional_entry = &mut cache.entries[provisional_entry_index];
|
||||||
// We eagerly update the response in the cache here. If we have to reevaluate
|
|
||||||
// this goal we use the new response when hitting a cycle, and we definitely
|
|
||||||
// want to access the final response whenever we look at the cache.
|
|
||||||
let prev_response = mem::replace(&mut provisional_entry.response, response);
|
|
||||||
|
|
||||||
// Was the current goal the root of a cycle and was the provisional response
|
|
||||||
// different from the final one.
|
|
||||||
if has_been_used && prev_response != response {
|
|
||||||
// If so, remove all entries whose result depends on this goal
|
|
||||||
// from the provisional cache...
|
|
||||||
//
|
|
||||||
// That's not completely correct, as a nested goal can also
|
|
||||||
// depend on a goal which is lower in the stack so it doesn't
|
|
||||||
// actually depend on the current goal. This should be fairly
|
|
||||||
// rare and is hopefully not relevant for performance.
|
|
||||||
#[allow(rustc::potential_query_instability)]
|
|
||||||
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
|
|
||||||
cache.entries.truncate(provisional_entry_index.index() + 1);
|
|
||||||
|
|
||||||
// ...and finally push our goal back on the stack and reevaluate it.
|
|
||||||
self.stack.push(StackElem { input, has_been_used: false });
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn with_new_goal(
|
|
||||||
&mut self,
|
|
||||||
tcx: TyCtxt<'tcx>,
|
|
||||||
canonical_input: CanonicalInput<'tcx>,
|
|
||||||
inspect: &mut ProofTreeBuilder<'tcx>,
|
|
||||||
mut loop_body: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
|
|
||||||
) -> QueryResult<'tcx> {
|
|
||||||
if inspect.use_global_cache() {
|
|
||||||
if let Some(result) = self.global_cache(tcx).get(&canonical_input, tcx) {
|
|
||||||
debug!(?canonical_input, ?result, "cache hit");
|
|
||||||
inspect.cache_hit(CacheHit::Global);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match self.try_push_stack(tcx, canonical_input, inspect) {
|
|
||||||
Ok(()) => {}
|
|
||||||
// Our goal is already on the stack, eager return.
|
|
||||||
Err(response) => return response,
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is for global caching, so we properly track query dependencies.
|
|
||||||
// Everything that affects the `Result` should be performed within this
|
|
||||||
// `with_anon_task` closure.
|
|
||||||
let (result, dep_node) = tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
|
|
||||||
self.repeat_while_none(
|
|
||||||
|this| {
|
|
||||||
let result = this.deal_with_overflow(tcx, canonical_input);
|
|
||||||
let _ = this.stack.pop().unwrap();
|
|
||||||
result
|
|
||||||
},
|
|
||||||
|this| {
|
|
||||||
let result = loop_body(this, inspect);
|
|
||||||
this.try_finalize_goal(canonical_input, result).then(|| result)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
let cache = &mut self.provisional_cache;
|
|
||||||
let provisional_entry_index = *cache.lookup_table.get(&canonical_input).unwrap();
|
|
||||||
let provisional_entry = &mut cache.entries[provisional_entry_index];
|
|
||||||
let depth = provisional_entry.depth;
|
let depth = provisional_entry.depth;
|
||||||
|
|
||||||
// If not, we're done with this goal.
|
|
||||||
//
|
|
||||||
// Check whether that this goal doesn't depend on a goal deeper on the stack
|
|
||||||
// and if so, move it to the global cache.
|
|
||||||
//
|
|
||||||
// Note that if any nested goal were to depend on something deeper on the stack,
|
|
||||||
// this would have also updated the depth of the current goal.
|
|
||||||
if depth == self.stack.next_index() {
|
if depth == self.stack.next_index() {
|
||||||
// If the current goal is the head of a cycle, we drop all other
|
for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
|
||||||
// cycle participants without moving them to the global cache.
|
|
||||||
let other_cycle_participants = provisional_entry_index.index() + 1;
|
|
||||||
for (i, entry) in cache.entries.drain_enumerated(other_cycle_participants..) {
|
|
||||||
let actual_index = cache.lookup_table.remove(&entry.input);
|
let actual_index = cache.lookup_table.remove(&entry.input);
|
||||||
debug_assert_eq!(Some(i), actual_index);
|
debug_assert_eq!(Some(i), actual_index);
|
||||||
debug_assert!(entry.depth == depth);
|
debug_assert!(entry.depth == depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
let current_goal = cache.entries.pop().unwrap();
|
// When encountering a cycle, both inductive and coinductive, we only
|
||||||
let actual_index = cache.lookup_table.remove(¤t_goal.input);
|
// move the root into the global cache. We also store all other cycle
|
||||||
debug_assert_eq!(Some(provisional_entry_index), actual_index);
|
// participants involved.
|
||||||
debug_assert!(current_goal.depth == depth);
|
|
||||||
|
|
||||||
// We move the root goal to the global cache if we either did not hit an overflow or if it's
|
|
||||||
// the root goal as that will now always hit the same overflow limit.
|
|
||||||
//
|
//
|
||||||
// NOTE: We cannot move any non-root goals to the global cache. When replaying the root goal's
|
// We disable the global cache entry of the root goal if a cycle
|
||||||
// dependencies, our non-root goal may no longer appear as child of the root goal.
|
// participant is on the stack. This is necessary to prevent unstable
|
||||||
//
|
// results. See the comment of `StackEntry::cycle_participants` for
|
||||||
// See https://github.com/rust-lang/rust/pull/108071 for some additional context.
|
// more details.
|
||||||
let can_cache = inspect.use_global_cache()
|
let reached_depth = final_entry.reached_depth.as_usize() - self.stack.len();
|
||||||
&& (!self.overflow_data.did_overflow() || self.stack.is_empty());
|
self.global_cache(tcx).insert(
|
||||||
if can_cache {
|
input,
|
||||||
self.global_cache(tcx).insert(current_goal.input, dep_node, current_goal.response)
|
reached_depth,
|
||||||
}
|
final_entry.encountered_overflow,
|
||||||
|
final_entry.cycle_participants,
|
||||||
|
dep_node,
|
||||||
|
result,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn response_no_constraints(
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
goal: CanonicalInput<'tcx>,
|
||||||
|
certainty: Certainty,
|
||||||
|
) -> QueryResult<'tcx> {
|
||||||
|
Ok(super::response_no_constraints_raw(tcx, goal.max_universe, goal.variables, certainty))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,120 +0,0 @@
|
|||||||
use rustc_infer::infer::canonical::Canonical;
|
|
||||||
use rustc_infer::traits::query::NoSolution;
|
|
||||||
use rustc_middle::traits::solve::{Certainty, QueryResult};
|
|
||||||
use rustc_middle::ty::TyCtxt;
|
|
||||||
use rustc_session::Limit;
|
|
||||||
|
|
||||||
use super::SearchGraph;
|
|
||||||
use crate::solve::{response_no_constraints, EvalCtxt};
|
|
||||||
|
|
||||||
/// When detecting a solver overflow, we return ambiguity. Overflow can be
|
|
||||||
/// *hidden* by either a fatal error in an **AND** or a trivial success in an **OR**.
|
|
||||||
///
|
|
||||||
/// This is in issue in case of exponential blowup, e.g. if each goal on the stack
|
|
||||||
/// has multiple nested (overflowing) candidates. To deal with this, we reduce the limit
|
|
||||||
/// used by the solver when hitting the default limit for the first time.
|
|
||||||
///
|
|
||||||
/// FIXME: Get tests where always using the `default_limit` results in a hang and refer
|
|
||||||
/// to them here. We can also improve the overflow strategy if necessary.
|
|
||||||
pub(super) struct OverflowData {
|
|
||||||
default_limit: Limit,
|
|
||||||
current_limit: Limit,
|
|
||||||
/// When proving an **AND** we have to repeatedly iterate over the yet unproven goals.
|
|
||||||
///
|
|
||||||
/// Because of this each iteration also increases the depth in addition to the stack
|
|
||||||
/// depth.
|
|
||||||
additional_depth: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OverflowData {
|
|
||||||
pub(super) fn new(tcx: TyCtxt<'_>) -> OverflowData {
|
|
||||||
let default_limit = tcx.recursion_limit();
|
|
||||||
OverflowData { default_limit, current_limit: default_limit, additional_depth: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(super) fn did_overflow(&self) -> bool {
|
|
||||||
self.default_limit.0 != self.current_limit.0
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(super) fn has_overflow(&self, depth: usize) -> bool {
|
|
||||||
!self.current_limit.value_within_limit(depth + self.additional_depth)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updating the current limit when hitting overflow.
|
|
||||||
fn deal_with_overflow(&mut self) {
|
|
||||||
// When first hitting overflow we reduce the overflow limit
|
|
||||||
// for all future goals to prevent hangs if there's an exponential
|
|
||||||
// blowup.
|
|
||||||
self.current_limit.0 = self.default_limit.0 / 8;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(in crate::solve) trait OverflowHandler<'tcx> {
|
|
||||||
fn search_graph(&mut self) -> &mut SearchGraph<'tcx>;
|
|
||||||
|
|
||||||
fn repeat_while_none<T>(
|
|
||||||
&mut self,
|
|
||||||
on_overflow: impl FnOnce(&mut Self) -> Result<T, NoSolution>,
|
|
||||||
mut loop_body: impl FnMut(&mut Self) -> Option<Result<T, NoSolution>>,
|
|
||||||
) -> Result<T, NoSolution> {
|
|
||||||
let start_depth = self.search_graph().overflow_data.additional_depth;
|
|
||||||
let depth = self.search_graph().stack.len();
|
|
||||||
while !self.search_graph().overflow_data.has_overflow(depth) {
|
|
||||||
if let Some(result) = loop_body(self) {
|
|
||||||
self.search_graph().overflow_data.additional_depth = start_depth;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.search_graph().overflow_data.additional_depth += 1;
|
|
||||||
}
|
|
||||||
self.search_graph().overflow_data.additional_depth = start_depth;
|
|
||||||
self.search_graph().overflow_data.deal_with_overflow();
|
|
||||||
on_overflow(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment the `additional_depth` by one and evaluate `body`, or `on_overflow`
|
|
||||||
// if the depth is overflown.
|
|
||||||
fn with_incremented_depth<T>(
|
|
||||||
&mut self,
|
|
||||||
on_overflow: impl FnOnce(&mut Self) -> T,
|
|
||||||
body: impl FnOnce(&mut Self) -> T,
|
|
||||||
) -> T {
|
|
||||||
let depth = self.search_graph().stack.len();
|
|
||||||
self.search_graph().overflow_data.additional_depth += 1;
|
|
||||||
|
|
||||||
let result = if self.search_graph().overflow_data.has_overflow(depth) {
|
|
||||||
self.search_graph().overflow_data.deal_with_overflow();
|
|
||||||
on_overflow(self)
|
|
||||||
} else {
|
|
||||||
body(self)
|
|
||||||
};
|
|
||||||
|
|
||||||
self.search_graph().overflow_data.additional_depth -= 1;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx> OverflowHandler<'tcx> for EvalCtxt<'_, 'tcx> {
|
|
||||||
fn search_graph(&mut self) -> &mut SearchGraph<'tcx> {
|
|
||||||
&mut self.search_graph
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx> OverflowHandler<'tcx> for SearchGraph<'tcx> {
|
|
||||||
fn search_graph(&mut self) -> &mut SearchGraph<'tcx> {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx> SearchGraph<'tcx> {
|
|
||||||
pub fn deal_with_overflow(
|
|
||||||
&mut self,
|
|
||||||
tcx: TyCtxt<'tcx>,
|
|
||||||
goal: Canonical<'tcx, impl Sized>,
|
|
||||||
) -> QueryResult<'tcx> {
|
|
||||||
self.overflow_data.deal_with_overflow();
|
|
||||||
response_no_constraints(tcx, goal, Certainty::OVERFLOW)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,7 +1,6 @@
|
|||||||
//! Dealing with trait goals, i.e. `T: Trait<'a, U>`.
|
//! Dealing with trait goals, i.e. `T: Trait<'a, U>`.
|
||||||
|
|
||||||
use super::assembly::{self, structural_traits};
|
use super::assembly::{self, structural_traits};
|
||||||
use super::search_graph::OverflowHandler;
|
|
||||||
use super::{EvalCtxt, SolverMode};
|
use super::{EvalCtxt, SolverMode};
|
||||||
use rustc_hir::def_id::DefId;
|
use rustc_hir::def_id::DefId;
|
||||||
use rustc_hir::{LangItem, Movability};
|
use rustc_hir::{LangItem, Movability};
|
||||||
@ -913,12 +912,7 @@ fn probe_and_evaluate_goal_for_constituent_tys(
|
|||||||
ecx.add_goals(
|
ecx.add_goals(
|
||||||
constituent_tys(ecx, goal.predicate.self_ty())?
|
constituent_tys(ecx, goal.predicate.self_ty())?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|ty| {
|
.map(|ty| goal.with(ecx.tcx(), goal.predicate.with_self_ty(ecx.tcx(), ty)))
|
||||||
goal.with(
|
|
||||||
ecx.tcx(),
|
|
||||||
ty::Binder::dummy(goal.predicate.with_self_ty(ecx.tcx(), ty)),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
);
|
);
|
||||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||||
@ -935,7 +929,9 @@ pub(super) fn compute_trait_goal(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Normalize a non-self type when it is structually matched on when solving
|
/// Normalize a non-self type when it is structually matched on when solving
|
||||||
/// a built-in goal. This is handled already through `assemble_candidates_after_normalizing_self_ty`
|
/// a built-in goal.
|
||||||
|
///
|
||||||
|
/// This is handled already through `assemble_candidates_after_normalizing_self_ty`
|
||||||
/// for the self type, but for other goals, additional normalization of other
|
/// for the self type, but for other goals, additional normalization of other
|
||||||
/// arguments may be needed to completely implement the semantics of the trait.
|
/// arguments may be needed to completely implement the semantics of the trait.
|
||||||
///
|
///
|
||||||
@ -950,30 +946,22 @@ fn normalize_non_self_ty(
|
|||||||
return Ok(Some(ty));
|
return Ok(Some(ty));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.repeat_while_none(
|
for _ in 0..self.local_overflow_limit() {
|
||||||
|_| Ok(None),
|
let ty::Alias(_, projection_ty) = *ty.kind() else {
|
||||||
|ecx| {
|
return Ok(Some(ty));
|
||||||
let ty::Alias(_, projection_ty) = *ty.kind() else {
|
};
|
||||||
return Some(Ok(Some(ty)));
|
|
||||||
};
|
|
||||||
|
|
||||||
let normalized_ty = ecx.next_ty_infer();
|
let normalized_ty = self.next_ty_infer();
|
||||||
let normalizes_to_goal = Goal::new(
|
let normalizes_to_goal = Goal::new(
|
||||||
ecx.tcx(),
|
self.tcx(),
|
||||||
param_env,
|
param_env,
|
||||||
ty::Binder::dummy(ty::ProjectionPredicate {
|
ty::ProjectionPredicate { projection_ty, term: normalized_ty.into() },
|
||||||
projection_ty,
|
);
|
||||||
term: normalized_ty.into(),
|
self.add_goal(normalizes_to_goal);
|
||||||
}),
|
self.try_evaluate_added_goals()?;
|
||||||
);
|
ty = self.resolve_vars_if_possible(normalized_ty);
|
||||||
ecx.add_goal(normalizes_to_goal);
|
}
|
||||||
if let Err(err) = ecx.try_evaluate_added_goals() {
|
|
||||||
return Some(Err(err));
|
|
||||||
}
|
|
||||||
|
|
||||||
ty = ecx.resolve_vars_if_possible(normalized_ty);
|
Ok(None)
|
||||||
None
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2789,7 +2789,7 @@ fn note_obligation_cause_code<T>(
|
|||||||
// implement this trait and list them.
|
// implement this trait and list them.
|
||||||
err.note(format!(
|
err.note(format!(
|
||||||
"`{short_item_name}` is a \"sealed trait\", because to implement \
|
"`{short_item_name}` is a \"sealed trait\", because to implement \
|
||||||
it you also need to implelement `{}`, which is not accessible; \
|
it you also need to implement `{}`, which is not accessible; \
|
||||||
this is usually done to force you to use one of the provided \
|
this is usually done to force you to use one of the provided \
|
||||||
types that already implement it",
|
types that already implement it",
|
||||||
with_no_trimmed_paths!(tcx.def_path_str(def_id)),
|
with_no_trimmed_paths!(tcx.def_path_str(def_id)),
|
||||||
|
@ -68,7 +68,7 @@
|
|||||||
--test-arrow-color: #dedede;
|
--test-arrow-color: #dedede;
|
||||||
--test-arrow-background-color: rgba(78, 139, 202, 0.2);
|
--test-arrow-background-color: rgba(78, 139, 202, 0.2);
|
||||||
--test-arrow-hover-color: #dedede;
|
--test-arrow-hover-color: #dedede;
|
||||||
--test-arrow-hover-background-color: #4e8bca;
|
--test-arrow-hover-background-color: rgb(78, 139, 202);
|
||||||
--target-background-color: #494a3d;
|
--target-background-color: #494a3d;
|
||||||
--target-border-color: #bb7410;
|
--target-border-color: #bb7410;
|
||||||
--kbd-color: #000;
|
--kbd-color: #000;
|
||||||
|
@ -68,7 +68,7 @@
|
|||||||
--test-arrow-color: #f5f5f5;
|
--test-arrow-color: #f5f5f5;
|
||||||
--test-arrow-background-color: rgba(78, 139, 202, 0.2);
|
--test-arrow-background-color: rgba(78, 139, 202, 0.2);
|
||||||
--test-arrow-hover-color: #f5f5f5;
|
--test-arrow-hover-color: #f5f5f5;
|
||||||
--test-arrow-hover-background-color: #4e8bca;
|
--test-arrow-hover-background-color: rgb(78, 139, 202);
|
||||||
--target-background-color: #fdffd3;
|
--target-background-color: #fdffd3;
|
||||||
--target-border-color: #ad7c37;
|
--target-border-color: #ad7c37;
|
||||||
--kbd-color: #000;
|
--kbd-color: #000;
|
||||||
|
@ -2466,8 +2466,13 @@ fn make_compile_args(
|
|||||||
rustc.args(&["-A", "unused"]);
|
rustc.args(&["-A", "unused"]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow tests to use internal features.
|
// #[cfg(not(bootstrap)] unconditionally pass flag after beta bump
|
||||||
rustc.args(&["-A", "internal_features"]);
|
// since `ui-fulldeps --stage=1` builds using the stage 0 compiler,
|
||||||
|
// which doesn't have this lint.
|
||||||
|
if !(self.config.stage_id.starts_with("stage1-") && self.config.suite == "ui-fulldeps") {
|
||||||
|
// Allow tests to use internal features.
|
||||||
|
rustc.args(&["-A", "internal_features"]);
|
||||||
|
}
|
||||||
|
|
||||||
if self.props.force_host {
|
if self.props.force_host {
|
||||||
self.maybe_add_external_args(&mut rustc, &self.config.host_rustcflags);
|
self.maybe_add_external_args(&mut rustc, &self.config.host_rustcflags);
|
||||||
|
@ -33,22 +33,22 @@ define-function: (
|
|||||||
|
|
||||||
call-function: ("check-run-button", {
|
call-function: ("check-run-button", {
|
||||||
"theme": "ayu",
|
"theme": "ayu",
|
||||||
"color": "rgb(120, 135, 151)",
|
"color": "#788797",
|
||||||
"background": "rgba(57, 175, 215, 0.09)",
|
"background": "rgba(57, 175, 215, 0.09)",
|
||||||
"hover_color": "rgb(197, 197, 197)",
|
"hover_color": "#c5c5c5",
|
||||||
"hover_background": "rgba(57, 175, 215, 0.37)",
|
"hover_background": "rgba(57, 175, 215, 0.37)",
|
||||||
})
|
})
|
||||||
call-function: ("check-run-button", {
|
call-function: ("check-run-button", {
|
||||||
"theme": "dark",
|
"theme": "dark",
|
||||||
"color": "rgb(222, 222, 222)",
|
"color": "#dedede",
|
||||||
"background": "rgba(78, 139, 202, 0.2)",
|
"background": "rgba(78, 139, 202, 0.2)",
|
||||||
"hover_color": "rgb(222, 222, 222)",
|
"hover_color": "#dedede",
|
||||||
"hover_background": "rgb(78, 139, 202)",
|
"hover_background": "rgb(78, 139, 202)",
|
||||||
})
|
})
|
||||||
call-function: ("check-run-button", {
|
call-function: ("check-run-button", {
|
||||||
"theme": "light",
|
"theme": "light",
|
||||||
"color": "rgb(245, 245, 245)",
|
"color": "#f5f5f5",
|
||||||
"background": "rgba(78, 139, 202, 0.2)",
|
"background": "rgba(78, 139, 202, 0.2)",
|
||||||
"hover_color": "rgb(245, 245, 245)",
|
"hover_color": "#f5f5f5",
|
||||||
"hover_background": "rgb(78, 139, 202)",
|
"hover_background": "rgb(78, 139, 202)",
|
||||||
})
|
})
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
error[E0282]: type annotations needed
|
error[E0282]: type annotations needed
|
||||||
--> $DIR/param-env-region-infer.rs:18:10
|
--> $DIR/param-env-region-infer.rs:19:10
|
||||||
|
|
|
|
||||||
LL | t as _
|
LL | t as _
|
||||||
| ^ cannot infer type
|
| ^ cannot infer type
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
error[E0391]: cycle detected when computing type of `make_dyn_star::{opaque#0}`
|
|
||||||
--> $DIR/param-env-region-infer.rs:16:60
|
|
||||||
|
|
|
||||||
LL | fn make_dyn_star<'a, T: PointerLike + Debug + 'a>(t: T) -> impl PointerLike + Debug + 'a {
|
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
|
||||||
note: ...which requires type-checking `make_dyn_star`...
|
|
||||||
--> $DIR/param-env-region-infer.rs:16:1
|
|
||||||
|
|
|
||||||
LL | fn make_dyn_star<'a, T: PointerLike + Debug + 'a>(t: T) -> impl PointerLike + Debug + 'a {
|
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
= note: ...which requires computing layout of `make_dyn_star::{opaque#0}`...
|
|
||||||
= note: ...which requires normalizing `make_dyn_star::{opaque#0}`...
|
|
||||||
= note: ...which again requires computing type of `make_dyn_star::{opaque#0}`, completing the cycle
|
|
||||||
note: cycle used when checking item types in top-level module
|
|
||||||
--> $DIR/param-env-region-infer.rs:10:1
|
|
||||||
|
|
|
||||||
LL | / #![feature(dyn_star, pointer_like_trait)]
|
|
||||||
LL | | #![allow(incomplete_features)]
|
|
||||||
LL | |
|
|
||||||
LL | | use std::fmt::Debug;
|
|
||||||
... |
|
|
||||||
LL | |
|
|
||||||
LL | | fn main() {}
|
|
||||||
| |____________^
|
|
||||||
= note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
|
|
||||||
|
|
||||||
error: aborting due to previous error
|
|
||||||
|
|
||||||
For more information about this error, try `rustc --explain E0391`.
|
|
@ -1,9 +1,10 @@
|
|||||||
// revisions: current next
|
// revisions: current
|
||||||
// Need `-Zdeduplicate-diagnostics=yes` because the number of cycle errors
|
|
||||||
// emitted is for some horrible reason platform-specific.
|
|
||||||
//[next] compile-flags: -Ztrait-solver=next -Zdeduplicate-diagnostics=yes
|
|
||||||
// incremental
|
// incremental
|
||||||
|
|
||||||
|
// FIXME(-Ztrait-solver=next): THis currently results in unstable query results:
|
||||||
|
// `normalizes-to(opaque, opaque)` changes from `Maybe(Ambiguous)` to `Maybe(Overflow)`
|
||||||
|
// once the hidden type of the opaque is already defined to be itself.
|
||||||
|
|
||||||
// checks that we don't ICE if there are region inference variables in the environment
|
// checks that we don't ICE if there are region inference variables in the environment
|
||||||
// when computing `PointerLike` builtin candidates.
|
// when computing `PointerLike` builtin candidates.
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ note: required by a bound in `Sealed`
|
|||||||
|
|
|
|
||||||
LL | pub trait Sealed: self::b::Hidden {
|
LL | pub trait Sealed: self::b::Hidden {
|
||||||
| ^^^^^^^^^^^^^^^ required by this bound in `Sealed`
|
| ^^^^^^^^^^^^^^^ required by this bound in `Sealed`
|
||||||
= note: `Sealed` is a "sealed trait", because to implement it you also need to implelement `a::b::Hidden`, which is not accessible; this is usually done to force you to use one of the provided types that already implement it
|
= note: `Sealed` is a "sealed trait", because to implement it you also need to implement `a::b::Hidden`, which is not accessible; this is usually done to force you to use one of the provided types that already implement it
|
||||||
|
|
||||||
error: aborting due to previous error
|
error: aborting due to previous error
|
||||||
|
|
||||||
|
@ -0,0 +1,32 @@
|
|||||||
|
// compile-flags: -Ztrait-solver=next
|
||||||
|
|
||||||
|
// Proving `W<?0>: Trait` instantiates `?0` with `(W<?1>, W<?2>)` and then
|
||||||
|
// proves `W<?1>: Trait` and `W<?2>: Trait`, resulting in a coinductive cycle.
|
||||||
|
//
|
||||||
|
// Proving coinductive cycles runs until we reach a fixpoint. This fixpoint is
|
||||||
|
// never reached here and each step doubles the amount of nested obligations.
|
||||||
|
//
|
||||||
|
// This previously caused a hang in the trait solver, see
|
||||||
|
// https://github.com/rust-lang/trait-system-refactor-initiative/issues/13.
|
||||||
|
|
||||||
|
#![feature(rustc_attrs)]
|
||||||
|
|
||||||
|
#[rustc_coinductive]
|
||||||
|
trait Trait {}
|
||||||
|
|
||||||
|
struct W<T>(T);
|
||||||
|
|
||||||
|
impl<T, U> Trait for W<(W<T>, W<U>)>
|
||||||
|
where
|
||||||
|
W<T>: Trait,
|
||||||
|
W<U>: Trait,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impls<T: Trait>() {}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
impls::<W<_>>();
|
||||||
|
//~^ ERROR type annotations needed
|
||||||
|
//~| ERROR overflow evaluating the requirement
|
||||||
|
}
|
@ -0,0 +1,23 @@
|
|||||||
|
error[E0282]: type annotations needed
|
||||||
|
--> $DIR/fixpoint-exponential-growth.rs:29:5
|
||||||
|
|
|
||||||
|
LL | impls::<W<_>>();
|
||||||
|
| ^^^^^^^^^^^^^ cannot infer type of the type parameter `T` declared on the function `impls`
|
||||||
|
|
||||||
|
error[E0275]: overflow evaluating the requirement `W<_>: Trait`
|
||||||
|
--> $DIR/fixpoint-exponential-growth.rs:29:5
|
||||||
|
|
|
||||||
|
LL | impls::<W<_>>();
|
||||||
|
| ^^^^^^^^^^^^^
|
||||||
|
|
|
||||||
|
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`fixpoint_exponential_growth`)
|
||||||
|
note: required by a bound in `impls`
|
||||||
|
--> $DIR/fixpoint-exponential-growth.rs:26:13
|
||||||
|
|
|
||||||
|
LL | fn impls<T: Trait>() {}
|
||||||
|
| ^^^^^ required by this bound in `impls`
|
||||||
|
|
||||||
|
error: aborting due to 2 previous errors
|
||||||
|
|
||||||
|
Some errors have detailed explanations: E0275, E0282.
|
||||||
|
For more information about an error, try `rustc --explain E0275`.
|
@ -0,0 +1,69 @@
|
|||||||
|
// compile-flags: -Ztrait-solver=next
|
||||||
|
#![feature(rustc_attrs)]
|
||||||
|
|
||||||
|
// This test is incredibly subtle. At its core the goal is to get a coinductive cycle,
|
||||||
|
// which, depending on its root goal, either holds or errors. We achieve this by getting
|
||||||
|
// incomplete inference via a `ParamEnv` candidate in the `A<T>` impl and required
|
||||||
|
// inference from an `Impl` candidate in the `B<T>` impl.
|
||||||
|
//
|
||||||
|
// To make global cache accesses stronger than the guidance from the where-bounds, we add
|
||||||
|
// another coinductive cycle from `A<T>: Trait<U, V, D>` to `A<T>: Trait<U, D, V>` and only
|
||||||
|
// constrain `D` directly. This means that any candidates which rely on `V` only make
|
||||||
|
// progress in the second iteration, allowing a cache access in the first iteration to take
|
||||||
|
// precedence.
|
||||||
|
//
|
||||||
|
// tl;dr: our caching of coinductive cycles was broken and this is a regression
|
||||||
|
// test for that.
|
||||||
|
|
||||||
|
#[rustc_coinductive]
|
||||||
|
trait Trait<T: ?Sized, V: ?Sized, D: ?Sized> {}
|
||||||
|
struct A<T: ?Sized>(*const T);
|
||||||
|
struct B<T: ?Sized>(*const T);
|
||||||
|
|
||||||
|
trait IncompleteGuidance<T: ?Sized, V: ?Sized> {}
|
||||||
|
impl<T: ?Sized, U: ?Sized + 'static> IncompleteGuidance<U, u8> for T {}
|
||||||
|
impl<T: ?Sized, U: ?Sized + 'static> IncompleteGuidance<U, i8> for T {}
|
||||||
|
impl<T: ?Sized, U: ?Sized + 'static> IncompleteGuidance<U, i16> for T {}
|
||||||
|
|
||||||
|
trait ImplGuidance<T: ?Sized, V: ?Sized> {}
|
||||||
|
impl<T: ?Sized> ImplGuidance<u32, u8> for T {}
|
||||||
|
impl<T: ?Sized> ImplGuidance<i32, i8> for T {}
|
||||||
|
|
||||||
|
impl<T: ?Sized, U: ?Sized, V: ?Sized, D: ?Sized> Trait<U, V, D> for A<T>
|
||||||
|
where
|
||||||
|
T: IncompleteGuidance<U, V>,
|
||||||
|
A<T>: Trait<U, D, V>,
|
||||||
|
B<T>: Trait<U, V, D>,
|
||||||
|
(): ToU8<D>,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
trait ToU8<T: ?Sized> {}
|
||||||
|
impl ToU8<u8> for () {}
|
||||||
|
|
||||||
|
impl<T: ?Sized, U: ?Sized, V: ?Sized, D: ?Sized> Trait<U, V, D> for B<T>
|
||||||
|
where
|
||||||
|
T: ImplGuidance<U, V>,
|
||||||
|
A<T>: Trait<U, V, D>,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impls_trait<T: ?Sized + Trait<U, V, D>, U: ?Sized, V: ?Sized, D: ?Sized>() {}
|
||||||
|
|
||||||
|
fn with_bound<X>()
|
||||||
|
where
|
||||||
|
X: IncompleteGuidance<i32, u8>,
|
||||||
|
X: IncompleteGuidance<u32, i8>,
|
||||||
|
X: IncompleteGuidance<u32, i16>,
|
||||||
|
{
|
||||||
|
impls_trait::<B<X>, _, _, _>(); // entering the cycle from `B` works
|
||||||
|
|
||||||
|
// entering the cycle from `A` fails, but would work if we were to use the cache
|
||||||
|
// result of `B<X>`.
|
||||||
|
impls_trait::<A<X>, _, _, _>();
|
||||||
|
//~^ ERROR the trait bound `A<X>: Trait<_, _, _>` is not satisfied
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
with_bound::<u32>();
|
||||||
|
}
|
@ -0,0 +1,16 @@
|
|||||||
|
error[E0277]: the trait bound `A<X>: Trait<_, _, _>` is not satisfied
|
||||||
|
--> $DIR/incompleteness-unstable-result.rs:63:19
|
||||||
|
|
|
||||||
|
LL | impls_trait::<A<X>, _, _, _>();
|
||||||
|
| ^^^^ the trait `Trait<_, _, _>` is not implemented for `A<X>`
|
||||||
|
|
|
||||||
|
= help: the trait `Trait<U, V, D>` is implemented for `A<T>`
|
||||||
|
note: required by a bound in `impls_trait`
|
||||||
|
--> $DIR/incompleteness-unstable-result.rs:51:28
|
||||||
|
|
|
||||||
|
LL | fn impls_trait<T: ?Sized + Trait<U, V, D>, U: ?Sized, V: ?Sized, D: ?Sized>() {}
|
||||||
|
| ^^^^^^^^^^^^^^ required by this bound in `impls_trait`
|
||||||
|
|
||||||
|
error: aborting due to previous error
|
||||||
|
|
||||||
|
For more information about this error, try `rustc --explain E0277`.
|
23
tests/ui/traits/new-solver/overflow/global-cache.rs
Normal file
23
tests/ui/traits/new-solver/overflow/global-cache.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// compile-flags: -Ztrait-solver=next
|
||||||
|
|
||||||
|
// Check that we consider the reached depth of global cache
|
||||||
|
// entries when detecting overflow. We would otherwise be unstable
|
||||||
|
// wrt to incremental compilation.
|
||||||
|
#![recursion_limit = "9"]
|
||||||
|
|
||||||
|
trait Trait {}
|
||||||
|
|
||||||
|
struct Inc<T>(T);
|
||||||
|
|
||||||
|
impl<T: Trait> Trait for Inc<T> {}
|
||||||
|
impl Trait for () {}
|
||||||
|
|
||||||
|
fn impls_trait<T: Trait>() {}
|
||||||
|
|
||||||
|
type Four<T> = Inc<Inc<Inc<Inc<T>>>>;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
impls_trait::<Four<Four<()>>>();
|
||||||
|
impls_trait::<Four<Four<Four<Four<()>>>>>();
|
||||||
|
//~^ ERROR overflow evaluating the requirement
|
||||||
|
}
|
16
tests/ui/traits/new-solver/overflow/global-cache.stderr
Normal file
16
tests/ui/traits/new-solver/overflow/global-cache.stderr
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
error[E0275]: overflow evaluating the requirement `Inc<Inc<Inc<Inc<Inc<Inc<Inc<...>>>>>>>: Trait`
|
||||||
|
--> $DIR/global-cache.rs:21:5
|
||||||
|
|
|
||||||
|
LL | impls_trait::<Four<Four<Four<Four<()>>>>>();
|
||||||
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
|
||||||
|
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "18"]` attribute to your crate (`global_cache`)
|
||||||
|
note: required by a bound in `impls_trait`
|
||||||
|
--> $DIR/global-cache.rs:15:19
|
||||||
|
|
|
||||||
|
LL | fn impls_trait<T: Trait>() {}
|
||||||
|
| ^^^^^ required by this bound in `impls_trait`
|
||||||
|
|
||||||
|
error: aborting due to previous error
|
||||||
|
|
||||||
|
For more information about this error, try `rustc --explain E0275`.
|
@ -1,23 +1,23 @@
|
|||||||
error[E0425]: cannot find value `e` in this scope
|
error[E0425]: cannot find value `e` in this scope
|
||||||
--> $DIR/issue-114423.rs:7:51
|
--> $DIR/issue-114423-ice-regression-in-suggestion.rs:7:51
|
||||||
|
|
|
|
||||||
LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
||||||
| ^ not found in this scope
|
| ^ not found in this scope
|
||||||
|
|
||||||
error[E0425]: cannot find value `e` in this scope
|
error[E0425]: cannot find value `e` in this scope
|
||||||
--> $DIR/issue-114423.rs:7:62
|
--> $DIR/issue-114423-ice-regression-in-suggestion.rs:7:62
|
||||||
|
|
|
|
||||||
LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
||||||
| ^ not found in this scope
|
| ^ not found in this scope
|
||||||
|
|
||||||
error[E0425]: cannot find value `g` in this scope
|
error[E0425]: cannot find value `g` in this scope
|
||||||
--> $DIR/issue-114423.rs:11:22
|
--> $DIR/issue-114423-ice-regression-in-suggestion.rs:11:22
|
||||||
|
|
|
|
||||||
LL | let _ = RGB { r, g, b };
|
LL | let _ = RGB { r, g, b };
|
||||||
| ^ help: a local variable with a similar name exists: `b`
|
| ^ help: a local variable with a similar name exists: `b`
|
||||||
|
|
||||||
error[E0308]: mismatched types
|
error[E0308]: mismatched types
|
||||||
--> $DIR/issue-114423.rs:7:50
|
--> $DIR/issue-114423-ice-regression-in-suggestion.rs:7:50
|
||||||
|
|
|
|
||||||
LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
||||||
| --------------- ^^^^^^^^^^^^^^^^^^^^^^ expected a tuple with 3 elements, found one with 2 elements
|
| --------------- ^^^^^^^^^^^^^^^^^^^^^^ expected a tuple with 3 elements, found one with 2 elements
|
||||||
@ -28,7 +28,7 @@ LL | let (r, alone_in_path, b): (f32, f32, f32) = (e.clone(), e.clone());
|
|||||||
found tuple `(f32, f32)`
|
found tuple `(f32, f32)`
|
||||||
|
|
||||||
error[E0560]: struct `RGB` has no field named `r`
|
error[E0560]: struct `RGB` has no field named `r`
|
||||||
--> $DIR/issue-114423.rs:11:19
|
--> $DIR/issue-114423-ice-regression-in-suggestion.rs:11:19
|
||||||
|
|
|
|
||||||
LL | let _ = RGB { r, g, b };
|
LL | let _ = RGB { r, g, b };
|
||||||
| ^ `RGB` does not have this field
|
| ^ `RGB` does not have this field
|
||||||
@ -36,7 +36,7 @@ LL | let _ = RGB { r, g, b };
|
|||||||
= note: all struct fields are already assigned
|
= note: all struct fields are already assigned
|
||||||
|
|
||||||
error[E0308]: mismatched types
|
error[E0308]: mismatched types
|
||||||
--> $DIR/issue-114423.rs:11:25
|
--> $DIR/issue-114423-ice-regression-in-suggestion.rs:11:25
|
||||||
|
|
|
|
||||||
LL | let _ = RGB { r, g, b };
|
LL | let _ = RGB { r, g, b };
|
||||||
| ^ expected `f64`, found `f32`
|
| ^ expected `f64`, found `f32`
|
Loading…
Reference in New Issue
Block a user