diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index d7708a3bc3f..035bfe978f2 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -31,6 +31,7 @@ #[macro_use] mod plumbing; pub use plumbing::QueryCtxt; +use rustc_query_system::dep_graph::SerializedDepNodeIndex; use rustc_query_system::query::*; #[cfg(parallel_compiler)] pub use rustc_query_system::query::{deadlock, QueryContext}; diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs index 6522b1406be..35b7e5919e4 100644 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ b/compiler/rustc_query_impl/src/on_disk_cache.rs @@ -388,6 +388,12 @@ pub fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: Que debug_assert!(prev.is_none()); } + /// Return whether the cached query result can be decoded. + pub fn loadable_from_disk(&self, dep_node_index: SerializedDepNodeIndex) -> bool { + self.query_result_index.contains_key(&dep_node_index) + // with_decoder is infallible, so we can stop here + } + /// Returns the cached query result if there is something in the cache for /// the given `SerializedDepNodeIndex`; otherwise returns `None`. pub fn try_load_query_result<'tcx, T>( @@ -398,7 +404,9 @@ pub fn try_load_query_result<'tcx, T>( where T: for<'a> Decodable>, { - self.load_indexed(tcx, dep_node_index, &self.query_result_index) + let opt_value = self.load_indexed(tcx, dep_node_index, &self.query_result_index); + debug_assert_eq!(opt_value.is_some(), self.loadable_from_disk(dep_node_index)); + opt_value } /// Stores side effect emitted during computation of an anonymous query. @@ -428,8 +436,8 @@ fn load_indexed<'tcx, T>( T: for<'a> Decodable>, { let pos = index.get(&dep_node_index).cloned()?; - - self.with_decoder(tcx, pos, |decoder| Some(decode_tagged(decoder, dep_node_index))) + let value = self.with_decoder(tcx, pos, |decoder| decode_tagged(decoder, dep_node_index)); + Some(value) } fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>( diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 005ce16dbb9..ca3c3997df0 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -364,6 +364,14 @@ fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: D } } +pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool { + if let Some(cache) = tcx.on_disk_cache().as_ref() { + cache.loadable_from_disk(id) + } else { + false + } +} + pub(crate) fn try_load_from_disk<'tcx, V>( tcx: QueryCtxt<'tcx>, id: SerializedDepNodeIndex, @@ -535,6 +543,21 @@ fn try_load_from_disk( }) } + #[inline] + fn loadable_from_disk( + self, + _qcx: QueryCtxt<'tcx>, + _key: &Self::Key, + _index: SerializedDepNodeIndex, + ) -> bool { + should_ever_cache_on_disk!([$($modifiers)*] { + self.cache_on_disk(_qcx.tcx, _key) && + $crate::plumbing::loadable_from_disk(_qcx, _index) + } { + false + }) + } + #[inline(always)] fn anon(self) -> bool { is_anon!([$($modifiers)*]) diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index e44a00ca6cb..a0aeb812af9 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -43,6 +43,8 @@ fn query_cache<'a>(self, tcx: Qcx) -> &'a Self::Cache fn try_load_from_disk(self, qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk; + fn loadable_from_disk(self, qcx: Qcx, key: &Self::Key, idx: SerializedDepNodeIndex) -> bool; + fn anon(self) -> bool; fn eval_always(self) -> bool; fn depth_limit(self) -> bool; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 005fcd8c4cc..1229a5fe5e8 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -557,10 +557,17 @@ fn try_load_from_disk_and_cache_in_memory( // can be forced from `DepNode`. debug_assert!( !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), - "missing on-disk cache entry for {dep_node:?}" + "missing on-disk cache entry for reconstructible {dep_node:?}" ); } + // Sanity check for the logic in `ensure`: if the node is green and the result loadable, + // we should actually be able to load it. + debug_assert!( + !query.loadable_from_disk(qcx, &key, prev_dep_node_index), + "missing on-disk cache entry for loadable {dep_node:?}" + ); + // We could not load a result from the on-disk cache, so // recompute. let prof_timer = qcx.dep_context().profiler().query_provider(); @@ -719,7 +726,7 @@ fn ensure_must_run( let dep_node = query.construct_dep_node(*qcx.dep_context(), key); let dep_graph = qcx.dep_context().dep_graph(); - match dep_graph.try_mark_green(qcx, &dep_node) { + let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) { None => { // A None return from `try_mark_green` means that this is either // a new dep node or that the dep node has already been marked red. @@ -727,14 +734,17 @@ fn ensure_must_run( // DepNodeIndex. We must invoke the query itself. The performance cost // this introduces should be negligible as we'll immediately hit the // in-memory cache, or another query down the line will. - (true, Some(dep_node)) + return (true, Some(dep_node)); } - Some((_, dep_node_index)) => { + Some((serialized_dep_node_index, dep_node_index)) => { dep_graph.read_index(dep_node_index); qcx.dep_context().profiler().query_cache_hit(dep_node_index.into()); - (false, None) + serialized_dep_node_index } - } + }; + + let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index); + (!loadable, Some(dep_node)) } #[derive(Debug)]