diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 4adb4eb7475..74b8aaf4f27 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -12,12 +12,13 @@ use crate::query::SerializedDepNodeIndex; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::HandleCycleError; +#[cfg(parallel_compiler)] +use rustc_data_structures::cold_path; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sharded::Sharded; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; -#[cfg(parallel_compiler)] -use rustc_data_structures::{cold_path, sharded::Sharded}; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; use rustc_span::{Span, DUMMY_SP}; use std::cell::Cell; @@ -30,10 +31,7 @@ use super::QueryConfig; pub struct QueryState { - #[cfg(parallel_compiler)] active: Sharded>>, - #[cfg(not(parallel_compiler))] - active: Lock>>, } /// Indicates the state of a query for a given key in a query map. @@ -52,15 +50,8 @@ impl QueryState D: DepKind, { pub fn all_inactive(&self) -> bool { - #[cfg(parallel_compiler)] - { - let shards = self.active.lock_shards(); - shards.iter().all(|shard| shard.is_empty()) - } - #[cfg(not(parallel_compiler))] - { - self.active.lock().is_empty() - } + let shards = self.active.lock_shards(); + shards.iter().all(|shard| shard.is_empty()) } pub fn try_collect_active_jobs( @@ -71,26 +62,11 @@ pub fn try_collect_active_jobs( ) -> Option<()> { let mut active = Vec::new(); - #[cfg(parallel_compiler)] - { - // We use try_lock_shards here since we are called from the - // deadlock handler, and this shouldn't be locked. - let shards = self.active.try_lock_shards()?; - for shard in shards.iter() { - for (k, v) in shard.iter() { - if let QueryResult::Started(ref job) = *v { - active.push((*k, job.clone())); - } - } - } - } - #[cfg(not(parallel_compiler))] - { - // We use try_lock here since we are called from the - // deadlock handler, and this shouldn't be locked. - // (FIXME: Is this relevant for non-parallel compilers? It doesn't - // really hurt much.) - for (k, v) in self.active.try_lock()?.iter() { + // We use try_lock_shards here since we are called from the + // deadlock handler, and this shouldn't be locked. + let shards = self.active.try_lock_shards()?; + for shard in shards.iter() { + for (k, v) in shard.iter() { if let QueryResult::Started(ref job) = *v { active.push((*k, job.clone())); } @@ -184,10 +160,7 @@ fn complete(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) cache.complete(key, result, dep_node_index); let job = { - #[cfg(parallel_compiler)] let mut lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = state.active.lock(); match lock.remove(&key).unwrap() { QueryResult::Started(job) => job, QueryResult::Poisoned => panic!(), @@ -209,10 +182,7 @@ fn drop(&mut self) { // Poison the query so jobs waiting on it panic. let state = self.state; let job = { - #[cfg(parallel_compiler)] let mut shard = state.active.get_shard_by_value(&self.key).lock(); - #[cfg(not(parallel_compiler))] - let mut shard = state.active.lock(); let job = match shard.remove(&self.key).unwrap() { QueryResult::Started(job) => job, QueryResult::Poisoned => panic!(), @@ -325,10 +295,7 @@ fn try_execute_query( Qcx: QueryContext, { let state = query.query_state(qcx); - #[cfg(parallel_compiler)] let mut state_lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut state_lock = state.active.lock(); // For the parallel compiler we need to check both the query cache and query state structures // while holding the state lock to ensure that 1) the query has not yet completed and 2) the