Return a Result for query cache.

This commit is contained in:
Camille GILLOT 2020-10-23 22:34:32 +02:00
parent f8ab649dfd
commit 9f46259a75
2 changed files with 68 additions and 87 deletions

View File

@ -31,17 +31,15 @@ pub trait QueryCache: QueryStorage {
/// It returns the shard index and a lock guard to the shard, /// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need /// which will be used if the query is not in the cache and we need
/// to compute it. /// to compute it.
fn lookup<D, Q, R, OnHit, OnMiss>( fn lookup<'s, D, Q, R, OnHit>(
&self, &self,
state: &QueryState<D, Q, Self>, state: &'s QueryState<D, Q, Self>,
key: Self::Key, key: &Self::Key,
// `on_hit` can be called while holding a lock to the query state shard. // `on_hit` can be called while holding a lock to the query state shard.
on_hit: OnHit, on_hit: OnHit,
on_miss: OnMiss, ) -> Result<R, QueryLookup<'s, D, Q, Self::Key, Self::Sharded>>
) -> R
where where
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R, OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
fn complete( fn complete(
&self, &self,
@ -95,23 +93,24 @@ where
type Sharded = FxHashMap<K, (V, DepNodeIndex)>; type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)] #[inline(always)]
fn lookup<D, Q, R, OnHit, OnMiss>( fn lookup<'s, D, Q, R, OnHit>(
&self, &self,
state: &QueryState<D, Q, Self>, state: &'s QueryState<D, Q, Self>,
key: K, key: &K,
on_hit: OnHit, on_hit: OnHit,
on_miss: OnMiss, ) -> Result<R, QueryLookup<'s, D, Q, K, Self::Sharded>>
) -> R
where where
OnHit: FnOnce(&V, DepNodeIndex) -> R, OnHit: FnOnce(&V, DepNodeIndex) -> R,
OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{ {
let mut lookup = state.get_lookup(&key); let lookup = state.get_lookup(key);
let lock = &mut *lookup.lock; let result = lookup.lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, key);
let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key); if let Some((_, value)) = result {
let hit_result = on_hit(&value.0, value.1);
if let Some((_, value)) = result { on_hit(&value.0, value.1) } else { on_miss(key, lookup) } Ok(hit_result)
} else {
Err(lookup)
}
} }
#[inline] #[inline]
@ -177,26 +176,23 @@ where
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>; type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
#[inline(always)] #[inline(always)]
fn lookup<D, Q, R, OnHit, OnMiss>( fn lookup<'s, D, Q, R, OnHit>(
&self, &self,
state: &QueryState<D, Q, Self>, state: &'s QueryState<D, Q, Self>,
key: K, key: &K,
on_hit: OnHit, on_hit: OnHit,
on_miss: OnMiss, ) -> Result<R, QueryLookup<'s, D, Q, K, Self::Sharded>>
) -> R
where where
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R, OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{ {
let mut lookup = state.get_lookup(&key); let lookup = state.get_lookup(key);
let lock = &mut *lookup.lock; let result = lookup.lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, key);
let result = lock.cache.raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
if let Some((_, value)) = result { if let Some((_, value)) = result {
on_hit(&&value.0, value.1) let hit_result = on_hit(&&value.0, value.1);
Ok(hit_result)
} else { } else {
on_miss(key, lookup) Err(lookup)
} }
} }

View File

@ -248,13 +248,8 @@ where
return TryGetJob::Cycle(value); return TryGetJob::Cycle(value);
} }
let cached = try_get_cached( let cached = try_get_cached(tcx, state, key, |value, index| (value.clone(), index))
tcx, .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
state,
(*key).clone(),
|value, index| (value.clone(), index),
|_, _| panic!("value must be in cache after waiting"),
);
if let Some(prof_timer) = _query_blocked_prof_timer.take() { if let Some(prof_timer) = _query_blocked_prof_timer.take() {
prof_timer.finish_with_query_invocation_id(cached.1.into()); prof_timer.finish_with_query_invocation_id(cached.1.into());
@ -356,35 +351,28 @@ where
/// It returns the shard index and a lock guard to the shard, /// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need /// which will be used if the query is not in the cache and we need
/// to compute it. /// to compute it.
fn try_get_cached<CTX, C, R, OnHit, OnMiss>( fn try_get_cached<'a, CTX, C, R, OnHit>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX::DepKind, CTX::Query, C>, state: &'a QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key, key: &C::Key,
// `on_hit` can be called while holding a lock to the query cache // `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit, on_hit: OnHit,
on_miss: OnMiss, ) -> Result<R, QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>>
) -> R
where where
C: QueryCache, C: QueryCache,
CTX: QueryContext, CTX: QueryContext,
OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R, OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
{ {
state.cache.lookup( state.cache.lookup(state, &key, |value, index| {
state, if unlikely!(tcx.profiler().enabled()) {
key, tcx.profiler().query_cache_hit(index.into());
|value, index| { }
if unlikely!(tcx.profiler().enabled()) { #[cfg(debug_assertions)]
tcx.profiler().query_cache_hit(index.into()); {
} state.cache_hits.fetch_add(1, Ordering::Relaxed);
#[cfg(debug_assertions)] }
{ on_hit(value, index)
state.cache_hits.fetch_add(1, Ordering::Relaxed); })
}
on_hit(value, index)
},
on_miss,
)
} }
fn try_execute_query<CTX, C>( fn try_execute_query<CTX, C>(
@ -626,16 +614,14 @@ where
C: QueryCache, C: QueryCache,
C::Key: crate::dep_graph::DepNodeParams<CTX>, C::Key: crate::dep_graph::DepNodeParams<CTX>,
{ {
try_get_cached( let cached = try_get_cached(tcx, state, &key, |value, index| {
tcx, tcx.dep_graph().read_index(index);
state, value.clone()
key, });
|value, index| { match cached {
tcx.dep_graph().read_index(index); Ok(value) => value,
value.clone() Err(lookup) => try_execute_query(tcx, state, span, key, lookup, query),
}, }
|key, lookup| try_execute_query(tcx, state, span, key, lookup, query),
)
} }
/// Ensure that either this query has all green inputs or been executed. /// Ensure that either this query has all green inputs or been executed.
@ -694,25 +680,24 @@ fn force_query_impl<CTX, C>(
// We may be concurrently trying both execute and force a query. // We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query. // Ensure that only one of them runs the query.
try_get_cached( let cached = try_get_cached(tcx, state, &key, |_, _| {
tcx, // Cache hit, do nothing
state, });
key,
|_, _| { let lookup = match cached {
// Cache hit, do nothing Ok(()) => return,
}, Err(lookup) => lookup,
|key, lookup| { };
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
tcx, state, span, &key, lookup, query, let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
) { tcx, state, span, &key, lookup, query,
TryGetJob::NotYetStarted(job) => job, ) {
TryGetJob::Cycle(_) => return, TryGetJob::NotYetStarted(job) => job,
#[cfg(parallel_compiler)] TryGetJob::Cycle(_) => return,
TryGetJob::JobCompleted(_) => return, #[cfg(parallel_compiler)]
}; TryGetJob::JobCompleted(_) => return,
force_query_with_job(tcx, key, job, dep_node, query); };
}, force_query_with_job(tcx, key, job, dep_node, query);
);
} }
pub enum QueryMode { pub enum QueryMode {