From e240783a4d22c1e56b101ab230bee0b821065bd5 Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Mon, 7 Feb 2022 11:03:51 -0500 Subject: [PATCH] Switch QueryJobId to a single global counter This replaces the per-shard counters with a single global counter, simplifying the JobId struct down to just a u64 and removing the need to pipe a DepKind generic through a bunch of code. The performance implications on non-parallel compilers are likely minimal (this switches to `Cell` as the backing storage over a `u64`, but the latter was already inside a `RefCell` so it's not really a significance divergence). On parallel compilers, the cost of a single global u64 counter may be more significant: it adds a serialization point in theory. On the other hand, we can imagine changing the counter to have a thread-local component if it becomes worrisome or some similar structure. The new design is sufficiently simpler that it warrants the potential for slight changes down the line if/when we get parallel compilation to be more of a default. A u64 counter, instead of u32 (the old per-shard width), is chosen to avoid possibly overflowing it and causing problems; it is effectively impossible that we would overflow a u64 counter in this context. --- compiler/rustc_middle/src/ty/context.rs | 4 +- compiler/rustc_query_impl/src/lib.rs | 1 + compiler/rustc_query_impl/src/plumbing.rs | 33 ++-- .../rustc_query_system/src/query/config.rs | 2 +- compiler/rustc_query_system/src/query/job.rs | 155 ++++++------------ compiler/rustc_query_system/src/query/mod.rs | 10 +- .../rustc_query_system/src/query/plumbing.rs | 89 ++++------ 7 files changed, 116 insertions(+), 178 deletions(-) diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 5dae4b9e4c9..273d6771907 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -1668,7 +1668,7 @@ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option { pub mod tls { use super::{ptr_eq, GlobalCtxt, TyCtxt}; - use crate::dep_graph::{DepKind, TaskDepsRef}; + use crate::dep_graph::TaskDepsRef; use crate::ty::query; use rustc_data_structures::sync::{self, Lock}; use rustc_data_structures::thin_vec::ThinVec; @@ -1693,7 +1693,7 @@ pub struct ImplicitCtxt<'a, 'tcx> { /// The current query job, if any. This is updated by `JobOwner::start` in /// `ty::query::plumbing` when executing a query. - pub query: Option>, + pub query: Option, /// Where to store diagnostics for the current query job, if any. /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query. diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index bf859decc70..f6bce748679 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -15,6 +15,7 @@ extern crate rustc_middle; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; +use rustc_data_structures::sync::AtomicU64; use rustc_middle::arena::Arena; use rustc_middle::dep_graph::{self, DepKindStruct, SerializedDepNodeIndex}; use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values}; diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 6d76d09f619..326ba999bc3 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -3,7 +3,7 @@ //! manage the caches, and so forth. use crate::{on_disk_cache, Queries}; -use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::TyCtxt; use rustc_query_system::dep_graph::HasDepContext; @@ -15,6 +15,7 @@ use rustc_serialize::opaque; use std::any::Any; +use std::num::NonZeroU64; #[derive(Copy, Clone)] pub struct QueryCtxt<'tcx> { @@ -42,11 +43,20 @@ fn dep_context(&self) -> &Self::DepContext { } impl QueryContext for QueryCtxt<'_> { - fn current_query_job(&self) -> Option> { + fn next_job_id(&self) -> QueryJobId { + QueryJobId( + NonZeroU64::new( + self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed), + ) + .unwrap(), + ) + } + + fn current_query_job(&self) -> Option { tls::with_related_context(**self, |icx| icx.query) } - fn try_collect_active_jobs(&self) -> Option> { + fn try_collect_active_jobs(&self) -> Option { self.queries.try_collect_active_jobs(**self) } @@ -81,7 +91,7 @@ fn store_side_effects_for_anon_node( #[inline(always)] fn start_query( &self, - token: QueryJobId, + token: QueryJobId, diagnostics: Option<&Lock>>, compute: impl FnOnce() -> R, ) -> R { @@ -152,7 +162,7 @@ macro_rules! encode_queries { pub fn try_print_query_stack( self, - query: Option>, + query: Option, handler: &Handler, num_frames: Option, ) -> usize { @@ -320,7 +330,7 @@ impl<$tcx> QueryDescription> for queries::$name<$tcx> { type Cache = query_storage::$name<$tcx>; #[inline(always)] - fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState + fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState where QueryCtxt<$tcx>: 'a { &tcx.queries.$name @@ -471,10 +481,9 @@ pub struct Queries<$tcx> { pub on_disk_cache: Option>, - $($(#[$attr])* $name: QueryState< - crate::dep_graph::DepKind, - query_keys::$name<$tcx>, - >,)* + jobs: AtomicU64, + + $($(#[$attr])* $name: QueryState>,)* } impl<$tcx> Queries<$tcx> { @@ -487,6 +496,7 @@ pub fn new( local_providers: Box::new(local_providers), extern_providers: Box::new(extern_providers), on_disk_cache, + jobs: AtomicU64::new(1), $($name: Default::default()),* } } @@ -494,14 +504,13 @@ pub fn new( pub(crate) fn try_collect_active_jobs( &$tcx self, tcx: TyCtxt<$tcx>, - ) -> Option> { + ) -> Option { let tcx = QueryCtxt { tcx, queries: self }; let mut jobs = QueryMap::default(); $( self.$name.try_collect_active_jobs( tcx, - dep_graph::DepKind::$name, make_query::$name, &mut jobs, )?; diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index d2b102b6f89..b1ff1e15a9d 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -59,7 +59,7 @@ pub trait QueryDescription: QueryConfig { fn describe(tcx: CTX, key: Self::Key) -> String; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: CTX) -> &'a QueryState + fn query_state<'a>(tcx: CTX) -> &'a QueryState where CTX: 'a; diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index bd673030992..adf878a7f04 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -7,13 +7,11 @@ use rustc_session::Session; use rustc_span::Span; -use std::convert::TryFrom; use std::hash::Hash; -use std::num::NonZeroU32; +use std::num::NonZeroU64; #[cfg(parallel_compiler)] use { - crate::dep_graph::DepKind, parking_lot::{Condvar, Mutex}, rustc_data_structures::fx::FxHashSet, rustc_data_structures::sync::Lock, @@ -33,80 +31,57 @@ pub struct QueryInfo { pub query: QueryStackFrame, } -pub type QueryMap = FxHashMap, QueryJobInfo>; - -/// A value uniquely identifying an active query job within a shard in the query cache. -#[derive(Copy, Clone, Eq, PartialEq, Hash)] -pub struct QueryShardJobId(pub NonZeroU32); +pub type QueryMap = FxHashMap; /// A value uniquely identifying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash)] -pub struct QueryJobId { - /// Which job within a shard is this - pub job: QueryShardJobId, +pub struct QueryJobId(pub NonZeroU64); - /// In which shard is this job - pub shard: u16, - - /// What kind of query this job is. - pub kind: D, -} - -impl QueryJobId -where - D: Copy + Clone + Eq + Hash, -{ - pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self { - QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } - } - - fn query(self, map: &QueryMap) -> QueryStackFrame { +impl QueryJobId { + fn query(self, map: &QueryMap) -> QueryStackFrame { map.get(&self).unwrap().query.clone() } #[cfg(parallel_compiler)] - fn span(self, map: &QueryMap) -> Span { + fn span(self, map: &QueryMap) -> Span { map.get(&self).unwrap().job.span } #[cfg(parallel_compiler)] - fn parent(self, map: &QueryMap) -> Option> { + fn parent(self, map: &QueryMap) -> Option { map.get(&self).unwrap().job.parent } #[cfg(parallel_compiler)] - fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { + fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { map.get(&self).unwrap().job.latch.as_ref() } } -pub struct QueryJobInfo { +pub struct QueryJobInfo { pub query: QueryStackFrame, - pub job: QueryJob, + pub job: QueryJob, } /// Represents an active query job. #[derive(Clone)] -pub struct QueryJob { - pub id: QueryShardJobId, +pub struct QueryJob { + pub id: QueryJobId, /// The span corresponding to the reason for which this query was required. pub span: Span, /// The parent query job which created this job and is implicitly waiting on it. - pub parent: Option>, + pub parent: Option, /// The latch that is used to wait on this job. #[cfg(parallel_compiler)] - latch: Option>, + latch: Option, } -impl QueryJob -where - D: Copy + Clone + Eq + Hash, -{ +impl QueryJob { /// Creates a new query job. - pub fn new(id: QueryShardJobId, span: Span, parent: Option>) -> Self { + pub fn new(id: QueryJobId, span: Span, parent: Option) -> Self { QueryJob { id, span, @@ -117,7 +92,7 @@ pub fn new(id: QueryShardJobId, span: Span, parent: Option>) -> Se } #[cfg(parallel_compiler)] - pub(super) fn latch(&mut self) -> QueryLatch { + pub(super) fn latch(&mut self) -> QueryLatch { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -139,16 +114,13 @@ pub fn signal_complete(self) { } #[cfg(not(parallel_compiler))] -impl QueryJobId -where - D: Copy + Clone + Eq + Hash, -{ +impl QueryJobId { #[cold] #[inline(never)] pub(super) fn find_cycle_in_stack( &self, - query_map: QueryMap, - current_job: &Option>, + query_map: QueryMap, + current_job: &Option, span: Span, ) -> CycleError { // Find the waitee amongst `current_job` parents @@ -184,15 +156,15 @@ pub(super) fn find_cycle_in_stack( } #[cfg(parallel_compiler)] -struct QueryWaiter { - query: Option>, +struct QueryWaiter { + query: Option, condvar: Condvar, span: Span, cycle: Lock>, } #[cfg(parallel_compiler)] -impl QueryWaiter { +impl QueryWaiter { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); self.condvar.notify_one(); @@ -200,34 +172,27 @@ fn notify(&self, registry: &rayon_core::Registry) { } #[cfg(parallel_compiler)] -struct QueryLatchInfo { +struct QueryLatchInfo { complete: bool, - waiters: Vec>>, + waiters: Vec>, } #[cfg(parallel_compiler)] #[derive(Clone)] -pub(super) struct QueryLatch { - info: Lrc>>, +pub(super) struct QueryLatch { + info: Lrc>, } #[cfg(parallel_compiler)] -impl QueryLatch { +impl QueryLatch { fn new() -> Self { QueryLatch { info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), } } -} -#[cfg(parallel_compiler)] -impl QueryLatch { /// Awaits for the query job to complete. - pub(super) fn wait_on( - &self, - query: Option>, - span: Span, - ) -> Result<(), CycleError> { + pub(super) fn wait_on(&self, query: Option, span: Span) -> Result<(), CycleError> { let waiter = Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); self.wait_on_inner(&waiter); @@ -242,7 +207,7 @@ pub(super) fn wait_on( } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Lrc>) { + fn wait_on_inner(&self, waiter: &Lrc) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -276,7 +241,7 @@ fn set(&self) { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Lrc> { + fn extract_waiter(&self, waiter: usize) -> Lrc { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -286,7 +251,7 @@ fn extract_waiter(&self, waiter: usize) -> Lrc> { /// A resumable waiter of a query. The usize is the index into waiters in the query's latch #[cfg(parallel_compiler)] -type Waiter = (QueryJobId, usize); +type Waiter = (QueryJobId, usize); /// Visits all the non-resumable and resumable waiters of a query. /// Only waiters in a query are visited. @@ -298,14 +263,9 @@ fn extract_waiter(&self, waiter: usize) -> Lrc> { /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_compiler)] -fn visit_waiters( - query_map: &QueryMap, - query: QueryJobId, - mut visit: F, -) -> Option>> +fn visit_waiters(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option> where - D: Copy + Clone + Eq + Hash, - F: FnMut(Span, QueryJobId) -> Option>>, + F: FnMut(Span, QueryJobId) -> Option>, { // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(parent) = query.parent(query_map) { @@ -334,16 +294,13 @@ fn visit_waiters( /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. #[cfg(parallel_compiler)] -fn cycle_check( - query_map: &QueryMap, - query: QueryJobId, +fn cycle_check( + query_map: &QueryMap, + query: QueryJobId, span: Span, - stack: &mut Vec<(Span, QueryJobId)>, - visited: &mut FxHashSet>, -) -> Option>> -where - D: Copy + Clone + Eq + Hash, -{ + stack: &mut Vec<(Span, QueryJobId)>, + visited: &mut FxHashSet, +) -> Option> { if !visited.insert(query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) { // We detected a query cycle, fix up the initial span and return Some @@ -378,14 +335,11 @@ fn cycle_check( /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. #[cfg(parallel_compiler)] -fn connected_to_root( - query_map: &QueryMap, - query: QueryJobId, - visited: &mut FxHashSet>, -) -> bool -where - D: Copy + Clone + Eq + Hash, -{ +fn connected_to_root( + query_map: &QueryMap, + query: QueryJobId, + visited: &mut FxHashSet, +) -> bool { // We already visited this or we're deliberately ignoring it if !visited.insert(query) { return false; @@ -404,10 +358,9 @@ fn connected_to_root( // Deterministically pick an query from a list #[cfg(parallel_compiler)] -fn pick_query<'a, D, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T +fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T where - D: Copy + Clone + Eq + Hash, - F: Fn(&T) -> (Span, QueryJobId), + F: Fn(&T) -> (Span, QueryJobId), { // Deterministically pick an entry point // FIXME: Sort this instead @@ -431,10 +384,10 @@ fn pick_query<'a, D, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> & /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. #[cfg(parallel_compiler)] -fn remove_cycle( - query_map: &QueryMap, - jobs: &mut Vec>, - wakelist: &mut Vec>>, +fn remove_cycle( + query_map: &QueryMap, + jobs: &mut Vec, + wakelist: &mut Vec>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -489,7 +442,7 @@ fn remove_cycle( } } }) - .collect::, Option<(Span, QueryJobId)>)>>(); + .collect::)>>(); // Deterministically pick an entry point let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1)); @@ -544,7 +497,7 @@ pub fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let query_map = tcx.try_collect_active_jobs().unwrap(); - let mut jobs: Vec> = query_map.keys().cloned().collect(); + let mut jobs: Vec = query_map.keys().cloned().collect(); let mut found_cycle = false; @@ -630,7 +583,7 @@ pub(crate) fn report_cycle<'a>( pub fn print_query_stack( tcx: CTX, - mut current_query: Option>, + mut current_query: Option, handler: &Handler, num_frames: Option, ) -> usize { diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index a2f7843baaa..361ae3c4352 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -117,10 +117,12 @@ pub fn append(&mut self, other: QuerySideEffects) { } pub trait QueryContext: HasDepContext { - /// Get the query information from the TLS context. - fn current_query_job(&self) -> Option>; + fn next_job_id(&self) -> QueryJobId; - fn try_collect_active_jobs(&self) -> Option>; + /// Get the query information from the TLS context. + fn current_query_job(&self) -> Option; + + fn try_collect_active_jobs(&self) -> Option; /// Load side effects associated to the node in the previous session. fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects; @@ -140,7 +142,7 @@ fn store_side_effects_for_anon_node( /// captured during execution and the actual result. fn start_query( &self, - token: QueryJobId, + token: QueryJobId, diagnostics: Option<&Lock>>, compute: impl FnOnce() -> R, ) -> R; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index da1f3617647..77e1fd3f2cc 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -5,9 +5,7 @@ use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams}; use crate::query::caches::QueryCache; use crate::query::config::{QueryDescription, QueryVtable}; -use crate::query::job::{ - report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId, -}; +use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHasher}; @@ -24,7 +22,6 @@ use std::fmt::Debug; use std::hash::{Hash, Hasher}; use std::mem; -use std::num::NonZeroU32; use std::ptr; pub struct QueryCacheStore { @@ -69,36 +66,32 @@ pub fn iter_results(&self, f: &mut dyn FnMut(&C::Key, &C::Value, DepNodeIndex)) } } -struct QueryStateShard { - active: FxHashMap>, - - /// Used to generate unique ids for active jobs. - jobs: u32, +struct QueryStateShard { + active: FxHashMap, } -impl Default for QueryStateShard { - fn default() -> QueryStateShard { - QueryStateShard { active: Default::default(), jobs: 0 } +impl Default for QueryStateShard { + fn default() -> QueryStateShard { + QueryStateShard { active: Default::default() } } } -pub struct QueryState { - shards: Sharded>, +pub struct QueryState { + shards: Sharded>, } /// Indicates the state of a query for a given key in a query map. -enum QueryResult { +enum QueryResult { /// An already executing query. The query job can be used to await for its completion. - Started(QueryJob), + Started(QueryJob), /// The query panicked. Queries trying to wait on this will raise a fatal error which will /// silently panic. Poisoned, } -impl QueryState +impl QueryState where - D: Copy + Clone + Eq + Hash, K: Eq + Hash + Clone + Debug, { pub fn all_inactive(&self) -> bool { @@ -109,19 +102,17 @@ pub fn all_inactive(&self) -> bool { pub fn try_collect_active_jobs( &self, tcx: CTX, - kind: D, make_query: fn(CTX, K) -> QueryStackFrame, - jobs: &mut QueryMap, + jobs: &mut QueryMap, ) -> Option<()> { // We use try_lock_shards here since we are called from the // deadlock handler, and this shouldn't be locked. let shards = self.shards.try_lock_shards()?; - for (shard_id, shard) in shards.iter().enumerate() { + for shard in shards.iter() { for (k, v) in shard.active.iter() { if let QueryResult::Started(ref job) = *v { - let id = QueryJobId::new(job.id, shard_id, kind); let query = make_query(tcx, k.clone()); - jobs.insert(id, QueryJobInfo { query, job: job.clone() }); + jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); } } } @@ -130,22 +121,21 @@ pub fn try_collect_active_jobs( } } -impl Default for QueryState { - fn default() -> QueryState { +impl Default for QueryState { + fn default() -> QueryState { QueryState { shards: Default::default() } } } /// A type representing the responsibility to execute the job in the `job` field. /// This will poison the relevant query if dropped. -struct JobOwner<'tcx, D, K> +struct JobOwner<'tcx, K> where - D: Copy + Clone + Eq + Hash, K: Eq + Hash + Clone, { - state: &'tcx QueryState, + state: &'tcx QueryState, key: K, - id: QueryJobId, + id: QueryJobId, } #[cold] @@ -166,9 +156,8 @@ fn mk_cycle( cache.store_nocache(value) } -impl<'tcx, D, K> JobOwner<'tcx, D, K> +impl<'tcx, K> JobOwner<'tcx, K> where - D: Copy + Clone + Eq + Hash, K: Eq + Hash + Clone, { /// Either gets a `JobOwner` corresponding the query, allowing us to @@ -182,12 +171,11 @@ impl<'tcx, D, K> JobOwner<'tcx, D, K> #[inline(always)] fn try_start<'b, CTX>( tcx: &'b CTX, - state: &'b QueryState, + state: &'b QueryState, span: Span, key: K, lookup: QueryLookup, - dep_kind: CTX::DepKind, - ) -> TryGetJob<'b, CTX::DepKind, K> + ) -> TryGetJob<'b, K> where CTX: QueryContext, { @@ -197,27 +185,21 @@ fn try_start<'b, CTX>( match lock.active.entry(key) { Entry::Vacant(entry) => { - // Generate an id unique within this shard. - let id = lock.jobs.checked_add(1).unwrap(); - lock.jobs = id; - let id = QueryShardJobId(NonZeroU32::new(id).unwrap()); - + let id = tcx.next_job_id(); let job = tcx.current_query_job(); let job = QueryJob::new(id, span, job); let key = entry.key().clone(); entry.insert(QueryResult::Started(job)); - let global_id = QueryJobId::new(id, shard, dep_kind); - let owner = JobOwner { state, id: global_id, key }; + let owner = JobOwner { state, id, key }; return TryGetJob::NotYetStarted(owner); } Entry::Occupied(mut entry) => { match entry.get_mut() { #[cfg(not(parallel_compiler))] QueryResult::Started(job) => { - let id = QueryJobId::new(job.id, shard, dep_kind); - + let id = job.id; drop(state_lock); // If we are single-threaded we know that we have cycle error, @@ -295,9 +277,8 @@ fn complete( } } -impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K> +impl<'tcx, K> Drop for JobOwner<'tcx, K> where - D: Copy + Clone + Eq + Hash, K: Eq + Hash + Clone, { #[inline(never)] @@ -329,13 +310,12 @@ pub(crate) struct CycleError { } /// The result of `try_start`. -enum TryGetJob<'tcx, D, K> +enum TryGetJob<'tcx, K> where - D: Copy + Clone + Eq + Hash, K: Eq + Hash + Clone, { /// The query is not yet started. Contains a guard to the cache eventually used to start it. - NotYetStarted(JobOwner<'tcx, D, K>), + NotYetStarted(JobOwner<'tcx, K>), /// The query was already completed. /// Returns the result of the query and its dep-node index @@ -375,7 +355,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>( fn try_execute_query( tcx: CTX, - state: &QueryState, + state: &QueryState, cache: &QueryCacheStore, span: Span, key: C::Key, @@ -388,14 +368,7 @@ fn try_execute_query( C::Key: Clone + DepNodeParams, CTX: QueryContext, { - match JobOwner::<'_, CTX::DepKind, C::Key>::try_start( - &tcx, - state, - span, - key.clone(), - lookup, - query.dep_kind, - ) { + match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone(), lookup) { TryGetJob::NotYetStarted(job) => { let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id); let result = job.complete(cache, result, dep_node_index); @@ -427,7 +400,7 @@ fn execute_job( key: K, mut dep_node_opt: Option>, query: &QueryVtable, - job_id: QueryJobId, + job_id: QueryJobId, ) -> (V, DepNodeIndex) where K: Clone + DepNodeParams,