Remove sharding for VecCache
This sharding is never used (per the comment in code). If we re-add sharding at some point in the future this is cheap to restore, but for now no need for the extra complexity.
This commit is contained in:
parent
01f7f3a1ff
commit
668b3188ab
@ -101,7 +101,7 @@ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct VecCache<K: Idx, V> {
|
pub struct VecCache<K: Idx, V> {
|
||||||
cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
|
cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<K: Idx, V> Default for VecCache<K, V> {
|
impl<K: Idx, V> Default for VecCache<K, V> {
|
||||||
@ -120,24 +120,20 @@ impl<K, V> QueryCache for VecCache<K, V>
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
|
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
|
||||||
// FIXME: lock_shard_by_hash will use high bits which are usually zero in the index() passed
|
let lock = self.cache.lock();
|
||||||
// here. This makes sharding essentially useless, always selecting the zero'th shard.
|
|
||||||
let lock = self.cache.lock_shard_by_hash(key.index() as u64);
|
|
||||||
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
|
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
|
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
|
||||||
let mut lock = self.cache.lock_shard_by_hash(key.index() as u64);
|
let mut lock = self.cache.lock();
|
||||||
lock.insert(key, (value, index));
|
lock.insert(key, (value, index));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
||||||
for shard in self.cache.lock_shards() {
|
for (k, v) in self.cache.lock().iter_enumerated() {
|
||||||
for (k, v) in shard.iter_enumerated() {
|
if let Some(v) = v {
|
||||||
if let Some(v) = v {
|
f(&k, &v.0, v.1);
|
||||||
f(&k, &v.0, v.1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -149,9 +145,6 @@ pub struct DefIdCache<V> {
|
|||||||
///
|
///
|
||||||
/// The second element of the tuple is the set of keys actually present in the IndexVec, used
|
/// The second element of the tuple is the set of keys actually present in the IndexVec, used
|
||||||
/// for faster iteration in `iter()`.
|
/// for faster iteration in `iter()`.
|
||||||
// FIXME: This may want to be sharded, like VecCache. However *how* to shard an IndexVec isn't
|
|
||||||
// super clear; VecCache is effectively not sharded today (see FIXME there). For now just omit
|
|
||||||
// that complexity here.
|
|
||||||
local: Lock<(IndexVec<DefIndex, Option<(V, DepNodeIndex)>>, Vec<DefIndex>)>,
|
local: Lock<(IndexVec<DefIndex, Option<(V, DepNodeIndex)>>, Vec<DefIndex>)>,
|
||||||
foreign: DefaultCache<DefId, V>,
|
foreign: DefaultCache<DefId, V>,
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user