Remove QueryEngine trait

This commit is contained in:
John Kåre Alsaker 2023-03-25 09:46:19 +01:00
parent 897a146006
commit 66d85438ca
20 changed files with 272 additions and 327 deletions

View File

@ -3641,6 +3641,7 @@ dependencies = [
"rustc_plugin_impl",
"rustc_privacy",
"rustc_query_impl",
"rustc_query_system",
"rustc_resolve",
"rustc_session",
"rustc_span",
@ -3770,6 +3771,7 @@ dependencies = [
"derive_more",
"either",
"gsgdt",
"measureme",
"polonius-engine",
"rustc-rayon",
"rustc-rayon-core",

View File

@ -4,7 +4,7 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId};
use rustc_middle::ty::OnDiskCache;
use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_serialize::opaque::MemDecoder;
use rustc_serialize::Decodable;
use rustc_session::config::IncrementalStateAssertion;
@ -211,7 +211,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
/// If we are not in incremental compilation mode, returns `None`.
/// Otherwise, tries to load the query result cache from disk,
/// creating an empty cache if it could not be loaded.
pub fn load_query_result_cache<'a, C: OnDiskCache<'a>>(sess: &'a Session) -> Option<C> {
pub fn load_query_result_cache(sess: &Session) -> Option<OnDiskCache<'_>> {
if sess.opts.incremental.is_none() {
return None;
}
@ -223,7 +223,9 @@ pub fn load_query_result_cache<'a, C: OnDiskCache<'a>>(sess: &'a Session) -> Opt
&query_cache_path(sess),
sess.is_nightly_build(),
) {
LoadResult::Ok { data: (bytes, start_pos) } => Some(C::new(sess, bytes, start_pos)),
_ => Some(C::new_empty(sess.source_map())),
LoadResult::Ok { data: (bytes, start_pos) } => {
Some(OnDiskCache::new(sess, bytes, start_pos))
}
_ => Some(OnDiskCache::new_empty(sess.source_map())),
}
}

View File

@ -48,7 +48,7 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
move || {
sess.time("incr_comp_persist_result_cache", || {
// Drop the memory map so that we can remove the file and write to it.
if let Some(odc) = &tcx.on_disk_cache {
if let Some(odc) = &tcx.query_system.on_disk_cache {
odc.drop_serialized_data(tcx);
}

View File

@ -44,6 +44,7 @@ rustc_lint = { path = "../rustc_lint" }
rustc_errors = { path = "../rustc_errors" }
rustc_plugin_impl = { path = "../rustc_plugin_impl" }
rustc_privacy = { path = "../rustc_privacy" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_query_impl = { path = "../rustc_query_impl" }
rustc_resolve = { path = "../rustc_resolve" }
rustc_target = { path = "../rustc_target" }

View File

@ -12,6 +12,7 @@
use rustc_middle::ty;
use rustc_parse::maybe_new_parser_from_source_str;
use rustc_query_impl::QueryCtxt;
use rustc_query_system::query::print_query_stack;
use rustc_session::config::{self, CheckCfg, ErrorOutputType, Input, OutputFilenames};
use rustc_session::lint;
use rustc_session::parse::{CrateConfig, ParseSess};
@ -317,7 +318,7 @@ pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
// state if it was responsible for triggering the panic.
let i = ty::tls::with_context_opt(|icx| {
if let Some(icx) = icx {
QueryCtxt::from_tcx(icx.tcx).try_print_query_stack(icx.query, handler, num_frames)
print_query_stack(QueryCtxt { tcx: icx.tcx }, icx.query, handler, num_frames)
} else {
0
}

View File

@ -23,7 +23,6 @@
use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str, validate_attr};
use rustc_passes::{self, hir_stats, layout_test};
use rustc_plugin_impl as plugin;
use rustc_query_impl::{OnDiskCache, Queries as TcxQueries};
use rustc_resolve::Resolver;
use rustc_session::config::{CrateType, Input, OutputFilenames, OutputType};
use rustc_session::cstore::{MetadataLoader, Untracked};
@ -669,7 +668,6 @@ pub fn create_global_ctxt<'tcx>(
lint_store: Lrc<LintStore>,
dep_graph: DepGraph,
untracked: Untracked,
queries: &'tcx OnceCell<TcxQueries<'tcx>>,
gcx_cell: &'tcx OnceCell<GlobalCtxt<'tcx>>,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
hir_arena: &'tcx WorkerLocal<rustc_hir::Arena<'tcx>>,
@ -693,10 +691,6 @@ pub fn create_global_ctxt<'tcx>(
callback(sess, &mut local_providers, &mut extern_providers);
}
let queries = queries.get_or_init(|| {
TcxQueries::new(local_providers, extern_providers, query_result_on_disk_cache)
});
sess.time("setup_global_ctxt", || {
gcx_cell.get_or_init(move || {
TyCtxt::create_global_ctxt(
@ -706,9 +700,9 @@ pub fn create_global_ctxt<'tcx>(
hir_arena,
untracked,
dep_graph,
queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn),
queries.as_dyn(),
query_result_on_disk_cache,
rustc_query_impl::query_callbacks(arena),
rustc_query_impl::query_system_fns(local_providers, extern_providers),
)
})
})

View File

@ -16,7 +16,6 @@
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
use rustc_middle::ty::{GlobalCtxt, TyCtxt};
use rustc_query_impl::Queries as TcxQueries;
use rustc_session::config::{self, OutputFilenames, OutputType};
use rustc_session::cstore::Untracked;
use rustc_session::{output::find_crate_name, Session};
@ -81,7 +80,6 @@ fn default() -> Self {
pub struct Queries<'tcx> {
compiler: &'tcx Compiler,
gcx_cell: OnceCell<GlobalCtxt<'tcx>>,
queries: OnceCell<TcxQueries<'tcx>>,
arena: WorkerLocal<Arena<'tcx>>,
hir_arena: WorkerLocal<rustc_hir::Arena<'tcx>>,
@ -102,7 +100,6 @@ pub fn new(compiler: &'tcx Compiler) -> Queries<'tcx> {
Queries {
compiler,
gcx_cell: OnceCell::new(),
queries: OnceCell::new(),
arena: WorkerLocal::new(|_| Arena::default()),
hir_arena: WorkerLocal::new(|_| rustc_hir::Arena::default()),
dep_graph_future: Default::default(),
@ -225,7 +222,6 @@ pub fn global_ctxt(&'tcx self) -> Result<QueryResult<'_, &'tcx GlobalCtxt<'tcx>>
lint_store,
self.dep_graph()?.steal(),
untracked,
&self.queries,
&self.gcx_cell,
&self.arena,
&self.hir_arena,

View File

@ -168,7 +168,8 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
) -> R {
use rustc_data_structures::jobserver;
use rustc_middle::ty::tls;
use rustc_query_impl::{deadlock, QueryContext, QueryCtxt};
use rustc_query_impl::QueryCtxt;
use rustc_query_system::query::{deadlock, QueryContext};
let mut builder = rayon::ThreadPoolBuilder::new()
.thread_name(|_| "rustc".to_string())
@ -179,7 +180,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
// On deadlock, creates a new thread and forwards information in thread
// locals to it. The new thread runs the deadlock handler.
let query_map = tls::with(|tcx| {
QueryCtxt::from_tcx(tcx)
QueryCtxt::new(tcx)
.try_collect_active_jobs()
.expect("active jobs shouldn't be locked in deadlock handler")
});

View File

@ -11,6 +11,7 @@ chalk-ir = "0.87.0"
derive_more = "0.99.17"
either = "1.5.0"
gsgdt = "0.1.2"
measureme = "10.0.0"
polonius-engine = "0.13.0"
rustc_apfloat = { path = "../rustc_apfloat" }
rustc_arena = { path = "../rustc_arena" }

View File

@ -227,7 +227,9 @@ pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>>(
// References to statics doesn't need to know about their allocations,
// just about its `DefId`.
AllocDiscriminant::Static.encode(encoder);
did.encode(encoder);
// Cannot use `did.encode(encoder)` because of a bug around
// specializations and method calls.
Encodable::<E>::encode(&did, encoder);
}
}
}

View File

@ -9,6 +9,7 @@
pub mod erase;
mod keys;
pub mod on_disk_cache;
pub use keys::{AsLocalKey, Key, LocalCrate};
// Each of these queries corresponds to a function pointer field in the

View File

@ -1,4 +1,3 @@
use crate::QueryCtxt;
use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::stable_hasher::Hash64;
@ -13,8 +12,7 @@
use rustc_middle::mir::{self, interpret};
use rustc_middle::ty::codec::{RefDecodable, TyDecoder, TyEncoder};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_query_system::dep_graph::DepContext;
use rustc_query_system::query::{QueryCache, QuerySideEffects};
use rustc_query_system::query::QuerySideEffects;
use rustc_serialize::{
opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder},
Decodable, Decoder, Encodable, Encoder,
@ -123,7 +121,7 @@ struct Footer {
pub struct AbsoluteBytePos(u64);
impl AbsoluteBytePos {
fn new(pos: usize) -> AbsoluteBytePos {
pub fn new(pos: usize) -> AbsoluteBytePos {
AbsoluteBytePos(pos.try_into().expect("Incremental cache file size overflowed u64."))
}
@ -158,9 +156,9 @@ fn new(tcx: TyCtxt<'_>, file: &SourceFile) -> EncodedSourceFileId {
}
}
impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
impl<'sess> OnDiskCache<'sess> {
/// Creates a new `OnDiskCache` instance from the serialized data in `data`.
fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self {
pub fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self {
debug_assert!(sess.opts.incremental.is_some());
// Wrap in a scope so we can borrow `data`.
@ -193,7 +191,7 @@ fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self {
}
}
fn new_empty(source_map: &'sess SourceMap) -> Self {
pub fn new_empty(source_map: &'sess SourceMap) -> Self {
Self {
serialized_data: RwLock::new(None),
file_index_to_stable_id: Default::default(),
@ -215,7 +213,7 @@ fn new_empty(source_map: &'sess SourceMap) -> Self {
/// Cache promotions require invoking queries, which needs to read the serialized data.
/// In order to serialize the new on-disk cache, the former on-disk cache file needs to be
/// deleted, hence we won't be able to refer to its memmapped data.
fn drop_serialized_data(&self, tcx: TyCtxt<'_>) {
pub fn drop_serialized_data(&self, tcx: TyCtxt<'_>) {
// Load everything into memory so we can write it out to the on-disk
// cache. The vast majority of cacheable query results should already
// be in memory, so this should be a cheap operation.
@ -227,7 +225,7 @@ fn drop_serialized_data(&self, tcx: TyCtxt<'_>) {
*self.serialized_data.write() = None;
}
fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult {
pub fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult {
// Serializing the `DepGraph` should not modify it.
tcx.dep_graph.with_ignore(|| {
// Allocate `SourceFileIndex`es.
@ -269,7 +267,7 @@ fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult {
tcx.sess.time("encode_query_results", || {
let enc = &mut encoder;
let qri = &mut query_result_index;
QueryCtxt::from_tcx(tcx).encode_query_results(enc, qri);
(tcx.query_system.fns.encode_query_results)(tcx, enc, qri);
});
// Encode side effects.
@ -358,12 +356,6 @@ fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult {
encoder.finish()
})
}
}
impl<'sess> OnDiskCache<'sess> {
pub fn as_dyn(&self) -> &dyn rustc_middle::ty::OnDiskCache<'sess> {
self as _
}
/// Loads a `QuerySideEffects` created during the previous compilation session.
pub fn load_side_effects(
@ -855,7 +847,7 @@ fn source_file_index(&mut self, source_file: Lrc<SourceFile>) -> SourceFileIndex
/// encode the specified tag, then the given value, then the number of
/// bytes taken up by tag and value. On decoding, we can then verify that
/// we get the expected tag and read the expected number of bytes.
fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) {
pub fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) {
let start_pos = self.position();
tag.encode(self);
@ -1032,33 +1024,3 @@ fn encode(&self, e: &mut CacheEncoder<'a, 'tcx>) {
self.encode(&mut e.encoder);
}
}
pub(crate) fn encode_query_results<'a, 'tcx, Q>(
query: Q,
qcx: QueryCtxt<'tcx>,
encoder: &mut CacheEncoder<'a, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) where
Q: super::QueryConfigRestored<'tcx>,
Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>,
{
let _timer = qcx
.tcx
.profiler()
.verbose_generic_activity_with_arg("encode_query_results_for", query.name());
assert!(query.query_state(qcx).all_inactive());
let cache = query.query_cache(qcx);
cache.iter(&mut |key, value, dep_node| {
if query.cache_on_disk(qcx.tcx, &key) {
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
// Record position of the cache entry.
query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.encoder.position())));
// Encode the type check tables with the `SerializedDepNodeIndex`
// as tag.
encoder.encode_tagged(dep_node, &Q::restore(*value));
}
});
}

View File

@ -500,7 +500,6 @@ fn decode(decoder: &mut D) -> &'tcx Self {
macro_rules! implement_ty_decoder {
($DecoderName:ident <$($typaram:tt),*>) => {
mod __ty_decoder_impl {
use std::borrow::Cow;
use rustc_serialize::Decoder;
use super::$DecoderName;

View File

@ -14,11 +14,14 @@
use crate::middle::stability;
use crate::mir::interpret::{self, Allocation, ConstAllocation};
use crate::mir::{Body, Local, Place, PlaceElem, ProjectionKind, Promoted};
use crate::query::on_disk_cache::OnDiskCache;
use crate::query::LocalCrate;
use crate::thir::Thir;
use crate::traits;
use crate::traits::solve;
use crate::traits::solve::{ExternalConstraints, ExternalConstraintsData};
use crate::ty::query::QuerySystem;
use crate::ty::query::QuerySystemFns;
use crate::ty::query::{self, TyCtxtAt};
use crate::ty::{
self, AdtDef, AdtDefData, AdtKind, Binder, Const, ConstData, FloatTy, FloatVar, FloatVid,
@ -31,7 +34,6 @@
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::intern::Interned;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@ -61,7 +63,6 @@
use rustc_session::Limit;
use rustc_session::Session;
use rustc_span::def_id::{DefPathHash, StableCrateId};
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use rustc_target::abi::{FieldIdx, Layout, LayoutS, TargetDataLayout, VariantIdx};
@ -84,21 +85,6 @@
const TINY_CONST_EVAL_LIMIT: Limit = Limit(20);
pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
/// Creates a new `OnDiskCache` instance from the serialized data in `data`.
fn new(sess: &'tcx Session, data: Mmap, start_pos: usize) -> Self
where
Self: Sized;
fn new_empty(source_map: &'tcx SourceMap) -> Self
where
Self: Sized;
fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>);
fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: FileEncoder) -> FileEncodeResult;
}
#[allow(rustc::usage_of_ty_tykind)]
impl<'tcx> Interner for TyCtxt<'tcx> {
type AdtDef = ty::AdtDef<'tcx>;
@ -527,13 +513,6 @@ pub struct GlobalCtxt<'tcx> {
untracked: Untracked,
/// This provides access to the incremental compilation on-disk cache for query results.
/// Do not access this directly. It is only meant to be used by
/// `DepGraph::try_mark_green()` and the query infrastructure.
/// This is `None` if we are not incremental compilation mode
pub on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
pub queries: &'tcx dyn query::QueryEngine<'tcx>,
pub query_system: query::QuerySystem<'tcx>,
pub(crate) query_kinds: &'tcx [DepKindStruct<'tcx>],
@ -674,9 +653,9 @@ pub fn create_global_ctxt(
hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
untracked: Untracked,
dep_graph: DepGraph,
on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
queries: &'tcx dyn query::QueryEngine<'tcx>,
on_disk_cache: Option<OnDiskCache<'tcx>>,
query_kinds: &'tcx [DepKindStruct<'tcx>],
query_system_fns: QuerySystemFns<'tcx>,
) -> GlobalCtxt<'tcx> {
let data_layout = s.target.parse_data_layout().unwrap_or_else(|err| {
s.emit_fatal(err);
@ -698,9 +677,7 @@ pub fn create_global_ctxt(
lifetimes: common_lifetimes,
consts: common_consts,
untracked,
on_disk_cache,
queries,
query_system: Default::default(),
query_system: QuerySystem::new(query_system_fns, on_disk_cache),
query_kinds,
ty_rcache: Default::default(),
pred_rcache: Default::default(),
@ -1039,7 +1016,7 @@ pub fn with_stable_hashing_context<R>(
}
pub fn serialize_query_result_cache(self, encoder: FileEncoder) -> FileEncodeResult {
self.on_disk_cache.as_ref().map_or(Ok(0), |c| c.serialize(self, encoder))
self.query_system.on_disk_cache.as_ref().map_or(Ok(0), |c| c.serialize(self, encoder))
}
/// If `true`, we should use lazy normalization for constants, otherwise

View File

@ -84,8 +84,7 @@
Const, ConstData, ConstInt, ConstKind, Expr, InferConst, ScalarInt, UnevaluatedConst, ValTree,
};
pub use self::context::{
tls, CtxtInterners, DeducedParamAttrs, FreeRegionInfo, GlobalCtxt, Lift, OnDiskCache, TyCtxt,
TyCtxtFeed,
tls, CtxtInterners, DeducedParamAttrs, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt, TyCtxtFeed,
};
pub use self::instance::{Instance, InstanceDef, ShortInstance, UnusedGenericParams};
pub use self::list::List;

View File

@ -1,6 +1,7 @@
#![allow(unused_parens)]
use crate::dep_graph;
use crate::dep_graph::DepKind;
use crate::infer::canonical::{self, Canonical};
use crate::lint::LintExpectation;
use crate::metadata::ModChild;
@ -17,7 +18,11 @@
};
use crate::mir::interpret::{LitToConstError, LitToConstInput};
use crate::mir::mono::CodegenUnit;
use crate::query::erase::{erase, restore, Erase};
use crate::query::on_disk_cache::CacheEncoder;
use crate::query::on_disk_cache::EncodedDepNodeIndex;
use crate::query::on_disk_cache::OnDiskCache;
use crate::query::{AsLocalKey, Key};
use crate::thir;
use crate::traits::query::{
@ -38,13 +43,16 @@
use crate::ty::util::AlwaysRequiresDrop;
use crate::ty::GeneratorDiagnosticData;
use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt, UnusedGenericParams};
use measureme::StringId;
use rustc_arena::TypedArena;
use rustc_ast as ast;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_attr as attr;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap, FxIndexSet};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::AtomicU64;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::sync::WorkerLocal;
use rustc_data_structures::unord::UnordSet;
@ -58,6 +66,7 @@
use rustc_hir::lang_items::{LangItem, LanguageItems};
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
use rustc_index::IndexVec;
use rustc_query_system::ich::StableHashingContext;
pub(crate) use rustc_query_system::query::QueryJobId;
use rustc_query_system::query::*;
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
@ -76,17 +85,70 @@
use std::path::PathBuf;
use std::sync::Arc;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_query_system::ich::StableHashingContext;
pub struct QueryKeyStringCache {
pub def_id_cache: FxHashMap<DefId, StringId>,
}
impl QueryKeyStringCache {
pub fn new() -> QueryKeyStringCache {
QueryKeyStringCache { def_id_cache: Default::default() }
}
}
#[derive(Clone, Copy)]
pub struct QueryStruct<'tcx> {
pub try_collect_active_jobs: fn(TyCtxt<'tcx>, &mut QueryMap<DepKind>) -> Option<()>,
pub alloc_self_profile_query_strings: fn(TyCtxt<'tcx>, &mut QueryKeyStringCache),
pub encode_query_results:
Option<fn(TyCtxt<'tcx>, &mut CacheEncoder<'_, 'tcx>, &mut EncodedDepNodeIndex)>,
}
pub struct QuerySystemFns<'tcx> {
pub engine: QueryEngine,
pub local_providers: Providers,
pub extern_providers: ExternProviders,
pub query_structs: Vec<QueryStruct<'tcx>>,
pub encode_query_results: fn(
tcx: TyCtxt<'tcx>,
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
),
pub try_mark_green: fn(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool,
}
#[derive(Default)]
pub struct QuerySystem<'tcx> {
pub states: QueryStates<'tcx>,
pub arenas: QueryArenas<'tcx>,
pub caches: QueryCaches<'tcx>,
/// This provides access to the incremental compilation on-disk cache for query results.
/// Do not access this directly. It is only meant to be used by
/// `DepGraph::try_mark_green()` and the query infrastructure.
/// This is `None` if we are not incremental compilation mode
pub on_disk_cache: Option<OnDiskCache<'tcx>>,
pub fns: QuerySystemFns<'tcx>,
pub jobs: AtomicU64,
// Since we erase query value types we tell the typesystem about them with `PhantomData`.
_phantom_values: QueryPhantomValues<'tcx>,
}
impl<'tcx> QuerySystem<'tcx> {
pub fn new(fns: QuerySystemFns<'tcx>, on_disk_cache: Option<OnDiskCache<'tcx>>) -> Self {
QuerySystem {
states: Default::default(),
arenas: Default::default(),
caches: Default::default(),
on_disk_cache,
fns,
jobs: AtomicU64::new(1),
_phantom_values: Default::default(),
}
}
}
#[derive(Copy, Clone)]
pub struct TyCtxtAt<'tcx> {
pub tcx: TyCtxt<'tcx>,
@ -136,7 +198,7 @@ pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
}
pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
self.queries.try_mark_green(self, dep_node)
(self.query_system.fns.try_mark_green)(self, dep_node)
}
}
@ -349,7 +411,7 @@ pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
match try_get_cached(self.tcx, &self.tcx.query_system.caches.$name, &key) {
Some(_) => return,
None => self.tcx.queries.$name(
None => (self.tcx.query_system.fns.engine.$name)(
self.tcx,
DUMMY_SP,
key,
@ -367,7 +429,7 @@ pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
match try_get_cached(self.tcx, &self.tcx.query_system.caches.$name, &key) {
Some(_) => return,
None => self.tcx.queries.$name(
None => (self.tcx.query_system.fns.engine.$name)(
self.tcx,
DUMMY_SP,
key,
@ -396,11 +458,22 @@ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V
restore::<$V>(match try_get_cached(self.tcx, &self.tcx.query_system.caches.$name, &key) {
Some(value) => value,
None => self.tcx.queries.$name(self.tcx, self.span, key, QueryMode::Get).unwrap(),
None => (self.tcx.query_system.fns.engine.$name)(
self.tcx,
self.span,
key, QueryMode::Get
).unwrap(),
})
})*
}
#[derive(Default)]
pub struct QueryStates<'tcx> {
$(
pub $name: QueryState<$($K)*, DepKind>,
)*
}
pub struct Providers {
$(pub $name: for<'tcx> fn(
TyCtxt<'tcx>,
@ -446,19 +519,13 @@ impl Clone for ExternProviders {
fn clone(&self) -> Self { *self }
}
pub trait QueryEngine<'tcx>: rustc_data_structures::sync::Sync {
fn as_any(&'tcx self) -> &'tcx dyn std::any::Any;
fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool;
$($(#[$attr])*
fn $name(
&'tcx self,
tcx: TyCtxt<'tcx>,
span: Span,
key: query_keys::$name<'tcx>,
mode: QueryMode,
) -> Option<Erase<$V>>;)*
pub struct QueryEngine {
$(pub $name: for<'tcx> fn(
TyCtxt<'tcx>,
Span,
query_keys::$name<'tcx>,
QueryMode,
) -> Option<Erase<$V>>,)*
}
};
}

View File

@ -11,12 +11,10 @@
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
#[macro_use]
extern crate rustc_middle;
use rustc_data_structures::sync::AtomicU64;
use crate::plumbing::{encode_all_query_results, try_mark_green};
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::{self, DepKind, DepKindStruct};
use rustc_middle::query::erase::{erase, restore, Erase};
@ -24,7 +22,7 @@
use rustc_middle::ty::query::{
query_keys, query_provided, query_provided_to_value, query_storage, query_values,
};
use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine};
use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine, QuerySystemFns};
use rustc_middle::ty::TyCtxt;
use rustc_query_system::dep_graph::SerializedDepNodeIndex;
use rustc_query_system::Value;
@ -32,15 +30,10 @@
#[macro_use]
mod plumbing;
pub use plumbing::QueryCtxt;
use rustc_query_system::query::*;
#[cfg(parallel_compiler)]
pub use rustc_query_system::query::{deadlock, QueryContext};
pub use crate::plumbing::QueryCtxt;
pub use rustc_query_system::query::QueryConfig;
mod on_disk_cache;
pub use on_disk_cache::OnDiskCache;
use rustc_query_system::query::*;
mod profiling_support;
pub use self::profiling_support::alloc_self_profile_query_strings;
@ -54,9 +47,16 @@ trait QueryConfigRestored<'tcx>: QueryConfig<QueryCtxt<'tcx>> + Default {
rustc_query_append! { define_queries! }
impl<'tcx> Queries<'tcx> {
// Force codegen in the dyn-trait transformation in this crate.
pub fn as_dyn(&'tcx self) -> &'tcx dyn QueryEngine<'tcx> {
self
pub fn query_system_fns<'tcx>(
local_providers: Providers,
extern_providers: ExternProviders,
) -> QuerySystemFns<'tcx> {
QuerySystemFns {
engine: engine(),
local_providers,
extern_providers,
query_structs: make_dep_kind_array!(query_structs).to_vec(),
encode_query_results: encode_all_query_results,
try_mark_green: try_mark_green,
}
}

View File

@ -2,35 +2,44 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex};
use crate::profiling_support::QueryKeyStringCache;
use crate::{on_disk_cache, Queries};
use crate::rustc_middle::dep_graph::DepContext;
use crate::rustc_middle::ty::TyEncoder;
use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
use rustc_data_structures::sync::{AtomicU64, Lock};
use rustc_errors::{Diagnostic, Handler};
use rustc_data_structures::sync::Lock;
use rustc_errors::Diagnostic;
use rustc_index::Idx;
use rustc_middle::dep_graph::{
self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex,
};
use rustc_middle::query::on_disk_cache::AbsoluteBytePos;
use rustc_middle::query::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex};
use rustc_middle::query::Key;
use rustc_middle::ty::tls::{self, ImplicitCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_query_system::dep_graph::{DepNodeParams, HasDepContext};
use rustc_query_system::ich::StableHashingContext;
use rustc_query_system::query::{
force_query, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, QueryStackFrame,
force_query, QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects,
QueryStackFrame,
};
use rustc_query_system::{LayoutOfDepth, QueryOverflow};
use rustc_serialize::Decodable;
use rustc_serialize::Encodable;
use rustc_session::Limit;
use rustc_span::def_id::LOCAL_CRATE;
use std::any::Any;
use std::num::NonZeroU64;
use thin_vec::ThinVec;
#[derive(Copy, Clone)]
pub struct QueryCtxt<'tcx> {
pub tcx: TyCtxt<'tcx>,
pub queries: &'tcx Queries<'tcx>,
}
impl<'tcx> QueryCtxt<'tcx> {
#[inline]
pub fn new(tcx: TyCtxt<'tcx>) -> Self {
QueryCtxt { tcx }
}
}
impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> {
@ -53,44 +62,56 @@ fn dep_context(&self) -> &Self::DepContext {
}
impl QueryContext for QueryCtxt<'_> {
#[inline]
fn next_job_id(self) -> QueryJobId {
QueryJobId(
NonZeroU64::new(
self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed),
self.query_system.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed),
)
.unwrap(),
)
}
#[inline]
fn current_query_job(self) -> Option<QueryJobId> {
tls::with_related_context(*self, |icx| icx.query)
tls::with_related_context(self.tcx, |icx| icx.query)
}
fn try_collect_active_jobs(self) -> Option<QueryMap<DepKind>> {
self.queries.try_collect_active_jobs(*self)
let mut jobs = QueryMap::default();
for query in &self.query_system.fns.query_structs {
(query.try_collect_active_jobs)(self.tcx, &mut jobs);
}
Some(jobs)
}
// Interactions with on_disk_cache
fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
self.queries
self.query_system
.on_disk_cache
.as_ref()
.map(|c| c.load_side_effects(*self, prev_dep_node_index))
.map(|c| c.load_side_effects(self.tcx, prev_dep_node_index))
.unwrap_or_default()
}
#[inline(never)]
#[cold]
fn store_side_effects(self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) {
if let Some(c) = self.queries.on_disk_cache.as_ref() {
if let Some(c) = self.query_system.on_disk_cache.as_ref() {
c.store_side_effects(dep_node_index, side_effects)
}
}
#[inline(never)]
#[cold]
fn store_side_effects_for_anon_node(
self,
dep_node_index: DepNodeIndex,
side_effects: QuerySideEffects,
) {
if let Some(c) = self.queries.on_disk_cache.as_ref() {
if let Some(c) = self.query_system.on_disk_cache.as_ref() {
c.store_side_effects_for_anon_node(dep_node_index, side_effects)
}
}
@ -109,14 +130,14 @@ fn start_query<R>(
// The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`.
tls::with_related_context(*self, move |current_icx| {
tls::with_related_context(self.tcx, move |current_icx| {
if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) {
self.depth_limit_error(token);
}
// Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt {
tcx: *self,
tcx: self.tcx,
query: Some(token),
diagnostics,
query_depth: current_icx.query_depth + depth_limit as usize,
@ -152,51 +173,20 @@ fn depth_limit_error(self, job: QueryJobId) {
}
}
impl<'tcx> QueryCtxt<'tcx> {
#[inline]
pub fn from_tcx(tcx: TyCtxt<'tcx>) -> Self {
let queries = tcx.queries.as_any();
let queries = unsafe {
let queries = std::mem::transmute::<&dyn Any, &dyn Any>(queries);
let queries = queries.downcast_ref().unwrap();
let queries = std::mem::transmute::<&Queries<'_>, &Queries<'_>>(queries);
queries
};
QueryCtxt { tcx, queries }
}
pub(crate) fn on_disk_cache(self) -> Option<&'tcx on_disk_cache::OnDiskCache<'tcx>> {
self.queries.on_disk_cache.as_ref()
}
pub(super) fn encode_query_results(
self,
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) {
for query in &self.queries.query_structs {
if let Some(encode) = query.encode_query_results {
encode(self, encoder, query_result_index);
}
}
}
pub fn try_print_query_stack(
self,
query: Option<QueryJobId>,
handler: &Handler,
num_frames: Option<usize>,
) -> usize {
rustc_query_system::query::print_query_stack(self, query, handler, num_frames)
}
pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
tcx.dep_graph.try_mark_green(QueryCtxt::new(tcx), dep_node).is_some()
}
#[derive(Clone, Copy)]
pub(crate) struct QueryStruct<'tcx> {
pub try_collect_active_jobs: fn(QueryCtxt<'tcx>, &mut QueryMap<DepKind>) -> Option<()>,
pub alloc_self_profile_query_strings: fn(TyCtxt<'tcx>, &mut QueryKeyStringCache),
pub encode_query_results:
Option<fn(QueryCtxt<'tcx>, &mut CacheEncoder<'_, 'tcx>, &mut EncodedDepNodeIndex)>,
pub(super) fn encode_all_query_results<'tcx>(
tcx: TyCtxt<'tcx>,
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) {
for query in &tcx.query_system.fns.query_structs {
if let Some(encode) = query.encode_query_results {
encode(tcx, encoder, query_result_index);
}
}
}
macro_rules! handle_cycle_error {
@ -276,13 +266,13 @@ macro_rules! hash_result {
macro_rules! call_provider {
([][$qcx:expr, $name:ident, $key:expr]) => {{
($qcx.queries.local_providers.$name)($qcx.tcx, $key)
($qcx.query_system.fns.local_providers.$name)($qcx, $key)
}};
([(separate_provide_extern) $($rest:tt)*][$qcx:expr, $name:ident, $key:expr]) => {{
if let Some(key) = $key.as_local_key() {
($qcx.queries.local_providers.$name)($qcx.tcx, key)
($qcx.query_system.fns.local_providers.$name)($qcx, key)
} else {
($qcx.queries.extern_providers.$name)($qcx.tcx, $key)
($qcx.query_system.fns.extern_providers.$name)($qcx, $key)
}
}};
([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
@ -306,7 +296,7 @@ pub(crate) fn create_query_frame<
'tcx,
K: Copy + Key + for<'a> HashStable<StableHashingContext<'a>>,
>(
tcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
do_describe: fn(TyCtxt<'tcx>, K) -> String,
key: K,
kind: DepKind,
@ -318,7 +308,7 @@ pub(crate) fn create_query_frame<
// Showing visible path instead of any path is not that important in production.
ty::print::with_no_visible_paths!(
// Force filename-line mode to avoid invoking `type_of` query.
ty::print::with_forced_impl_filename_line!(do_describe(tcx.tcx, key))
ty::print::with_forced_impl_filename_line!(do_describe(tcx, key))
)
);
let description =
@ -328,7 +318,7 @@ pub(crate) fn create_query_frame<
// so exit to avoid infinite recursion.
None
} else {
Some(key.default_span(*tcx))
Some(key.default_span(tcx))
};
let def_id = key.key_as_def_id();
let def_kind = if kind == dep_graph::DepKind::opt_def_kind {
@ -350,6 +340,34 @@ pub(crate) fn create_query_frame<
QueryStackFrame::new(description, span, def_id, def_kind, kind, ty_adt_id, hash)
}
pub(crate) fn encode_query_results<'a, 'tcx, Q>(
query: Q,
qcx: QueryCtxt<'tcx>,
encoder: &mut CacheEncoder<'a, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) where
Q: super::QueryConfigRestored<'tcx>,
Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>,
{
let _timer =
qcx.profiler().verbose_generic_activity_with_arg("encode_query_results_for", query.name());
assert!(query.query_state(qcx).all_inactive());
let cache = query.query_cache(qcx);
cache.iter(&mut |key, value, dep_node| {
if query.cache_on_disk(qcx.tcx, &key) {
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
// Record position of the cache entry.
query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position())));
// Encode the type check tables with the `SerializedDepNodeIndex`
// as tag.
encoder.encode_tagged(dep_node, &Q::restore(*value));
}
});
}
fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode)
where
Q: QueryConfig<QueryCtxt<'tcx>>,
@ -364,8 +382,8 @@ fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: D
}
}
pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool {
if let Some(cache) = tcx.on_disk_cache().as_ref() {
pub(crate) fn loadable_from_disk<'tcx>(tcx: TyCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool {
if let Some(cache) = tcx.query_system.on_disk_cache.as_ref() {
cache.loadable_from_disk(id)
} else {
false
@ -373,13 +391,13 @@ pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNo
}
pub(crate) fn try_load_from_disk<'tcx, V>(
tcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
id: SerializedDepNodeIndex,
) -> Option<V>
where
V: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
{
tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id)
tcx.query_system.on_disk_cache.as_ref()?.try_load_query_result(tcx, id)
}
fn force_from_dep_node<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool
@ -407,8 +425,7 @@ fn force_from_dep_node<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode)
if let Some(key) = Q::Key::recover(tcx, &dep_node) {
#[cfg(debug_assertions)]
let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered();
let tcx = QueryCtxt::from_tcx(tcx);
force_query(query, tcx, key, dep_node);
force_query(query, QueryCtxt::new(tcx), key, dep_node);
true
} else {
false
@ -461,8 +478,33 @@ macro_rules! define_queries {
(
$($(#[$attr:meta])*
[$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
define_queries_struct! {
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
mod get_query {
use super::*;
$(
#[inline(always)]
#[tracing::instrument(level = "trace", skip(tcx))]
pub(super) fn $name<'tcx>(
tcx: TyCtxt<'tcx>,
span: Span,
key: query_keys::$name<'tcx>,
mode: QueryMode,
) -> Option<Erase<query_values::$name<'tcx>>> {
get_query(
queries::$name::default(),
QueryCtxt::new(tcx),
span,
key,
mode
)
}
)*
}
pub(crate) fn engine() -> QueryEngine {
QueryEngine {
$($name: get_query::$name,)*
}
}
#[allow(nonstandard_style)]
@ -502,7 +544,7 @@ fn cache_on_disk(self, tcx: TyCtxt<'tcx>, key: &Self::Key) -> bool {
fn query_state<'a>(self, tcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key, crate::dep_graph::DepKind>
where QueryCtxt<'tcx>: 'a
{
&tcx.queries.$name
&tcx.query_system.states.$name
}
#[inline(always)]
@ -521,7 +563,7 @@ fn execute_query(self, tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
fn compute(self, qcx: QueryCtxt<'tcx>, key: Self::Key) -> Self::Value {
query_provided_to_value::$name(
qcx.tcx,
call_provider!([$($modifiers)*][qcx, $name, key])
call_provider!([$($modifiers)*][qcx.tcx, $name, key])
)
}
@ -535,7 +577,7 @@ fn try_load_from_disk(
if ::rustc_middle::query::cached::$name(_qcx.tcx, _key) {
Some(|qcx: QueryCtxt<'tcx>, dep_node| {
let value = $crate::plumbing::try_load_from_disk::<query_provided::$name<'tcx>>(
qcx,
qcx.tcx,
dep_node
);
value.map(|value| query_provided_to_value::$name(qcx.tcx, value))
@ -557,7 +599,7 @@ fn loadable_from_disk(
) -> bool {
should_ever_cache_on_disk!([$($modifiers)*] {
self.cache_on_disk(_qcx.tcx, _key) &&
$crate::plumbing::loadable_from_disk(_qcx, _index)
$crate::plumbing::loadable_from_disk(_qcx.tcx, _index)
} {
false
})
@ -684,14 +726,13 @@ pub fn CompileMonoItem<'tcx>() -> DepKindStruct<'tcx> {
}
mod query_structs {
use rustc_middle::ty::TyCtxt;
use $crate::plumbing::{QueryStruct, QueryCtxt};
use $crate::profiling_support::QueryKeyStringCache;
use rustc_query_system::query::QueryMap;
use super::*;
use rustc_middle::ty::query::QueryStruct;
use rustc_middle::ty::query::QueryKeyStringCache;
use rustc_middle::dep_graph::DepKind;
pub(super) const fn dummy_query_struct<'tcx>() -> QueryStruct<'tcx> {
fn noop_try_collect_active_jobs(_: QueryCtxt<'_>, _: &mut QueryMap<DepKind>) -> Option<()> {
fn noop_try_collect_active_jobs(_: TyCtxt<'_>, _: &mut QueryMap<DepKind>) -> Option<()> {
None
}
fn noop_alloc_self_profile_query_strings(_: TyCtxt<'_>, _: &mut QueryKeyStringCache) {}
@ -717,7 +758,7 @@ pub(super) const fn $name<'tcx>() -> QueryStruct<'tcx> { QueryStruct {
let name = stringify!($name);
$crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name)
};
tcx.queries.$name.try_collect_active_jobs(
tcx.query_system.states.$name.try_collect_active_jobs(
tcx,
make_query,
qmap,
@ -731,10 +772,10 @@ pub(super) const fn $name<'tcx>() -> QueryStruct<'tcx> { QueryStruct {
string_cache,
)
},
encode_query_results: expand_if_cached!([$($modifiers)*], |qcx, encoder, query_result_index|
$crate::on_disk_cache::encode_query_results::<super::queries::$name<'tcx>>(
encode_query_results: expand_if_cached!([$($modifiers)*], |tcx, encoder, query_result_index|
$crate::plumbing::encode_query_results::<super::queries::$name<'tcx>>(
super::queries::$name::default(),
qcx,
QueryCtxt::new(tcx),
encoder,
query_result_index,
)
@ -747,93 +788,3 @@ pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct<'
}
}
}
use crate::{ExternProviders, OnDiskCache, Providers};
impl<'tcx> Queries<'tcx> {
pub fn new(
local_providers: Providers,
extern_providers: ExternProviders,
on_disk_cache: Option<OnDiskCache<'tcx>>,
) -> Self {
use crate::query_structs;
Queries {
local_providers: Box::new(local_providers),
extern_providers: Box::new(extern_providers),
query_structs: make_dep_kind_array!(query_structs).to_vec(),
on_disk_cache,
jobs: AtomicU64::new(1),
..Queries::default()
}
}
}
macro_rules! define_queries_struct {
(
input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
#[derive(Default)]
pub struct Queries<'tcx> {
local_providers: Box<Providers>,
extern_providers: Box<ExternProviders>,
query_structs: Vec<$crate::plumbing::QueryStruct<'tcx>>,
pub on_disk_cache: Option<OnDiskCache<'tcx>>,
jobs: AtomicU64,
$(
$(#[$attr])*
$name: QueryState<
<queries::$name<'tcx> as QueryConfig<QueryCtxt<'tcx>>>::Key,
rustc_middle::dep_graph::DepKind,
>,
)*
}
impl<'tcx> Queries<'tcx> {
pub(crate) fn try_collect_active_jobs(
&'tcx self,
tcx: TyCtxt<'tcx>,
) -> Option<QueryMap<rustc_middle::dep_graph::DepKind>> {
let tcx = QueryCtxt { tcx, queries: self };
let mut jobs = QueryMap::default();
for query in &self.query_structs {
(query.try_collect_active_jobs)(tcx, &mut jobs);
}
Some(jobs)
}
}
impl<'tcx> QueryEngine<'tcx> for Queries<'tcx> {
fn as_any(&'tcx self) -> &'tcx dyn std::any::Any {
let this = unsafe { std::mem::transmute::<&Queries<'_>, &Queries<'_>>(self) };
this as _
}
fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
let qcx = QueryCtxt { tcx, queries: self };
tcx.dep_graph.try_mark_green(qcx, dep_node).is_some()
}
$($(#[$attr])*
#[inline(always)]
#[tracing::instrument(level = "trace", skip(self, tcx))]
fn $name(
&'tcx self,
tcx: TyCtxt<'tcx>,
span: Span,
key: query_keys::$name<'tcx>,
mode: QueryMode,
) -> Option<Erase<query_values::$name<'tcx>>> {
let qcx = QueryCtxt { tcx, queries: self };
get_query(
queries::$name::default(),
qcx,
span,
key,
mode
)
})*
}
};
}

View File

@ -1,24 +1,13 @@
use crate::QueryCtxt;
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
use rustc_middle::ty::query::QueryKeyStringCache;
use rustc_middle::ty::TyCtxt;
use rustc_query_system::query::QueryCache;
use std::fmt::Debug;
use std::io::Write;
pub(crate) struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
impl QueryKeyStringCache {
fn new() -> QueryKeyStringCache {
QueryKeyStringCache { def_id_cache: Default::default() }
}
}
struct QueryKeyStringBuilder<'p, 'tcx> {
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
@ -253,9 +242,8 @@ pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
}
let mut string_cache = QueryKeyStringCache::new();
let queries = QueryCtxt::from_tcx(tcx);
for query in &queries.queries.query_structs {
for query in &tcx.query_system.fns.query_structs {
(query.alloc_self_profile_query_strings)(tcx, &mut string_cache);
}
}

View File

@ -1190,6 +1190,7 @@ pub fn must_emit_unwind_tables(&self) -> bool {
/// Returns the number of query threads that should be used for this
/// compilation
#[inline]
pub fn threads(&self) -> usize {
self.opts.unstable_opts.threads
}