Allow the QueryCache to specify storage.

This commit is contained in:
Camille GILLOT 2020-03-27 18:41:13 +01:00
parent fb5615a477
commit e8ef41e83f
5 changed files with 62 additions and 39 deletions

View File

@ -328,6 +328,10 @@ pub struct $name<$tcx> {
$(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
type Key = $($K)*;
type Value = $V;
type Stored = <
query_storage!([$($modifiers)*][$($K)*, $V])
as QueryStorage
>::Stored;
const NAME: &'static str = stringify!($name);
const CATEGORY: ProfileCategory = $category;
}
@ -426,8 +430,10 @@ pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V {
self.at(DUMMY_SP).$name(key)
pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
{
self.at(DUMMY_SP).$name(key.into_query_param())
})*
/// All self-profiling events generated by the query engine use
@ -463,7 +469,9 @@ pub fn alloc_self_profile_query_strings(self) {
impl TyCtxtAt<$tcx> {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V {
pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
{
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
})*
}

View File

@ -8,13 +8,21 @@
use std::hash::Hash;
use std::marker::PhantomData;
pub trait CacheSelector<K: Hash, V> {
type Cache: QueryCache<Key = K, Value = V>;
pub trait CacheSelector<K, V> {
type Cache;
}
pub trait QueryCache: Default {
type Key: Hash;
pub trait QueryStorage: Default {
type Value;
type Stored: Clone;
/// Store a value without putting it in the cache.
/// This is meant to be used with cycle errors.
fn store_nocache(&self, value: Self::Value) -> Self::Stored;
}
pub trait QueryCache: QueryStorage {
type Key: Hash;
type Sharded: Default;
/// Checks if the query is already computed and in the cache.
@ -30,7 +38,7 @@ fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
on_miss: OnMiss,
) -> R
where
OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R,
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R;
fn complete<CTX: QueryContext>(
@ -40,7 +48,7 @@ fn complete<CTX: QueryContext>(
key: Self::Key,
value: Self::Value,
index: DepNodeIndex,
);
) -> Self::Stored;
fn iter<R, L>(
&self,
@ -66,9 +74,18 @@ fn default() -> Self {
}
}
impl<K: Eq + Hash, V: Clone> QueryStorage for DefaultCache<K, V> {
type Value = V;
type Stored = V;
fn store_nocache(&self, value: Self::Value) -> Self::Stored {
// We have no dedicated storage
value
}
}
impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
type Key = K;
type Value = V;
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)]
@ -99,8 +116,9 @@ fn complete<CTX: QueryContext>(
key: K,
value: V,
index: DepNodeIndex,
) {
lock_sharded_storage.insert(key, (value, index));
) -> Self::Stored {
lock_sharded_storage.insert(key, (value.clone(), index));
value
}
fn iter<R, L>(

View File

@ -20,7 +20,8 @@ pub trait QueryConfig<CTX> {
const CATEGORY: ProfileCategory;
type Key: Eq + Hash + Clone + Debug;
type Value: Clone;
type Value;
type Stored: Clone;
}
pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
@ -28,7 +29,7 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
const EVAL_ALWAYS: bool;
const DEP_KIND: CTX::DepKind;
type Cache: QueryCache<Key = Self::Key, Value = Self::Value>;
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>;

View File

@ -7,7 +7,7 @@
pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
mod caches;
pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache};
pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage};
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};

View File

@ -148,7 +148,6 @@ struct JobOwner<'tcx, CTX: QueryContext, C>
where
C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
state: &'tcx QueryState<CTX, C>,
key: C::Key,
@ -159,7 +158,6 @@ impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
where
C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
@ -177,7 +175,7 @@ fn try_start<'a, 'b, Q>(
mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
) -> TryGetJob<'b, CTX, C>
where
Q: QueryDescription<CTX, Key = C::Key, Value = C::Value, Cache = C>,
Q: QueryDescription<CTX, Key = C::Key, Stored = C::Stored, Value = C::Value, Cache = C>,
CTX: QueryContext,
{
let lock = &mut *lookup.lock;
@ -229,7 +227,8 @@ fn try_start<'a, 'b, Q>(
// so we just return the error.
#[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| {
Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
let value = Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span));
Q::query_state(tcx).cache.store_nocache(value)
}));
// With parallel queries we might just have to wait on some other
@ -239,7 +238,9 @@ fn try_start<'a, 'b, Q>(
let result = latch.wait_on(tcx, span);
if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
let value = Q::handle_cycle_error(tcx, cycle);
let value = Q::query_state(tcx).cache.store_nocache(value);
return TryGetJob::Cycle(value);
}
let cached = try_get_cached(
@ -261,7 +262,7 @@ fn try_start<'a, 'b, Q>(
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
#[inline(always)]
fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) {
fn complete(self, tcx: CTX, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
let state = self.state;
@ -269,18 +270,18 @@ fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) {
// Forget ourself so our destructor won't poison the query
mem::forget(self);
let job = {
let result = result.clone();
let (job, result) = {
let mut lock = state.shards.get_shard_by_value(&key).lock();
let job = match lock.active.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
job
let result = state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
(job, result)
};
job.signal_complete();
result
}
}
@ -297,7 +298,6 @@ fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
where
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
#[inline(never)]
#[cold]
@ -331,7 +331,6 @@ pub struct CycleError<Q> {
enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
where
C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, CTX, C>),
@ -340,10 +339,10 @@ enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
/// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)]
JobCompleted((C::Value, DepNodeIndex)),
JobCompleted((C::Stored, DepNodeIndex)),
/// Trying to execute the query resulted in a cycle.
Cycle(C::Value),
Cycle(C::Stored),
}
/// Checks if the query is already computed and in the cache.
@ -362,7 +361,7 @@ fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
where
C: QueryCache,
CTX: QueryContext,
OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
{
state.cache.lookup(
@ -388,7 +387,7 @@ fn try_execute_query<Q, CTX>(
span: Span,
key: Q::Key,
lookup: QueryLookup<'_, CTX, Q::Key, <Q::Cache as QueryCache>::Sharded>,
) -> Q::Value
) -> Q::Stored
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
@ -427,9 +426,7 @@ fn try_execute_query<Q, CTX>(
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}
job.complete(tcx, &result, dep_node_index);
return result;
return job.complete(tcx, result, dep_node_index);
}
let dep_node = Q::to_dep_node(tcx, &key);
@ -454,8 +451,7 @@ fn try_execute_query<Q, CTX>(
})
});
if let Some((result, dep_node_index)) = loaded {
job.complete(tcx, &result, dep_node_index);
return result;
return job.complete(tcx, result, dep_node_index);
}
}
@ -558,7 +554,7 @@ fn force_query_with_job<Q, CTX>(
key: Q::Key,
job: JobOwner<'_, CTX, Q::Cache>,
dep_node: DepNode<CTX::DepKind>,
) -> (Q::Value, DepNodeIndex)
) -> (Q::Stored, DepNodeIndex)
where
Q: QueryDescription<CTX>,
CTX: QueryContext,
@ -603,13 +599,13 @@ fn force_query_with_job<Q, CTX>(
}
}
job.complete(tcx, &result, dep_node_index);
let result = job.complete(tcx, result, dep_node_index);
(result, dep_node_index)
}
#[inline(never)]
pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Value
pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Stored
where
Q: QueryDescription<CTX>,
CTX: QueryContext,