Remove conditional use of Sharded
from query state
This commit is contained in:
parent
b60e31b673
commit
b74cb78d63
@ -12,12 +12,13 @@
|
|||||||
use crate::query::SerializedDepNodeIndex;
|
use crate::query::SerializedDepNodeIndex;
|
||||||
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
|
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
|
||||||
use crate::HandleCycleError;
|
use crate::HandleCycleError;
|
||||||
|
#[cfg(parallel_compiler)]
|
||||||
|
use rustc_data_structures::cold_path;
|
||||||
use rustc_data_structures::fingerprint::Fingerprint;
|
use rustc_data_structures::fingerprint::Fingerprint;
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
|
use rustc_data_structures::sharded::Sharded;
|
||||||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||||
use rustc_data_structures::sync::Lock;
|
use rustc_data_structures::sync::Lock;
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
use rustc_data_structures::{cold_path, sharded::Sharded};
|
|
||||||
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
|
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
|
||||||
use rustc_span::{Span, DUMMY_SP};
|
use rustc_span::{Span, DUMMY_SP};
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
@ -30,10 +31,7 @@
|
|||||||
use super::QueryConfig;
|
use super::QueryConfig;
|
||||||
|
|
||||||
pub struct QueryState<K, D: DepKind> {
|
pub struct QueryState<K, D: DepKind> {
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
active: Sharded<FxHashMap<K, QueryResult<D>>>,
|
active: Sharded<FxHashMap<K, QueryResult<D>>>,
|
||||||
#[cfg(not(parallel_compiler))]
|
|
||||||
active: Lock<FxHashMap<K, QueryResult<D>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates the state of a query for a given key in a query map.
|
/// Indicates the state of a query for a given key in a query map.
|
||||||
@ -52,15 +50,8 @@ impl<K, D> QueryState<K, D>
|
|||||||
D: DepKind,
|
D: DepKind,
|
||||||
{
|
{
|
||||||
pub fn all_inactive(&self) -> bool {
|
pub fn all_inactive(&self) -> bool {
|
||||||
#[cfg(parallel_compiler)]
|
let shards = self.active.lock_shards();
|
||||||
{
|
shards.iter().all(|shard| shard.is_empty())
|
||||||
let shards = self.active.lock_shards();
|
|
||||||
shards.iter().all(|shard| shard.is_empty())
|
|
||||||
}
|
|
||||||
#[cfg(not(parallel_compiler))]
|
|
||||||
{
|
|
||||||
self.active.lock().is_empty()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_collect_active_jobs<Qcx: Copy>(
|
pub fn try_collect_active_jobs<Qcx: Copy>(
|
||||||
@ -71,26 +62,11 @@ pub fn try_collect_active_jobs<Qcx: Copy>(
|
|||||||
) -> Option<()> {
|
) -> Option<()> {
|
||||||
let mut active = Vec::new();
|
let mut active = Vec::new();
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
// We use try_lock_shards here since we are called from the
|
||||||
{
|
// deadlock handler, and this shouldn't be locked.
|
||||||
// We use try_lock_shards here since we are called from the
|
let shards = self.active.try_lock_shards()?;
|
||||||
// deadlock handler, and this shouldn't be locked.
|
for shard in shards.iter() {
|
||||||
let shards = self.active.try_lock_shards()?;
|
for (k, v) in shard.iter() {
|
||||||
for shard in shards.iter() {
|
|
||||||
for (k, v) in shard.iter() {
|
|
||||||
if let QueryResult::Started(ref job) = *v {
|
|
||||||
active.push((*k, job.clone()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(not(parallel_compiler))]
|
|
||||||
{
|
|
||||||
// We use try_lock here since we are called from the
|
|
||||||
// deadlock handler, and this shouldn't be locked.
|
|
||||||
// (FIXME: Is this relevant for non-parallel compilers? It doesn't
|
|
||||||
// really hurt much.)
|
|
||||||
for (k, v) in self.active.try_lock()?.iter() {
|
|
||||||
if let QueryResult::Started(ref job) = *v {
|
if let QueryResult::Started(ref job) = *v {
|
||||||
active.push((*k, job.clone()));
|
active.push((*k, job.clone()));
|
||||||
}
|
}
|
||||||
@ -184,10 +160,7 @@ fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
|
|||||||
cache.complete(key, result, dep_node_index);
|
cache.complete(key, result, dep_node_index);
|
||||||
|
|
||||||
let job = {
|
let job = {
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
let mut lock = state.active.get_shard_by_value(&key).lock();
|
let mut lock = state.active.get_shard_by_value(&key).lock();
|
||||||
#[cfg(not(parallel_compiler))]
|
|
||||||
let mut lock = state.active.lock();
|
|
||||||
match lock.remove(&key).unwrap() {
|
match lock.remove(&key).unwrap() {
|
||||||
QueryResult::Started(job) => job,
|
QueryResult::Started(job) => job,
|
||||||
QueryResult::Poisoned => panic!(),
|
QueryResult::Poisoned => panic!(),
|
||||||
@ -209,10 +182,7 @@ fn drop(&mut self) {
|
|||||||
// Poison the query so jobs waiting on it panic.
|
// Poison the query so jobs waiting on it panic.
|
||||||
let state = self.state;
|
let state = self.state;
|
||||||
let job = {
|
let job = {
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
let mut shard = state.active.get_shard_by_value(&self.key).lock();
|
let mut shard = state.active.get_shard_by_value(&self.key).lock();
|
||||||
#[cfg(not(parallel_compiler))]
|
|
||||||
let mut shard = state.active.lock();
|
|
||||||
let job = match shard.remove(&self.key).unwrap() {
|
let job = match shard.remove(&self.key).unwrap() {
|
||||||
QueryResult::Started(job) => job,
|
QueryResult::Started(job) => job,
|
||||||
QueryResult::Poisoned => panic!(),
|
QueryResult::Poisoned => panic!(),
|
||||||
@ -325,10 +295,7 @@ fn try_execute_query<Q, Qcx, const INCR: bool>(
|
|||||||
Qcx: QueryContext,
|
Qcx: QueryContext,
|
||||||
{
|
{
|
||||||
let state = query.query_state(qcx);
|
let state = query.query_state(qcx);
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
let mut state_lock = state.active.get_shard_by_value(&key).lock();
|
let mut state_lock = state.active.get_shard_by_value(&key).lock();
|
||||||
#[cfg(not(parallel_compiler))]
|
|
||||||
let mut state_lock = state.active.lock();
|
|
||||||
|
|
||||||
// For the parallel compiler we need to check both the query cache and query state structures
|
// For the parallel compiler we need to check both the query cache and query state structures
|
||||||
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
|
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
|
||||||
|
Loading…
Reference in New Issue
Block a user