From 7f7d4c376af12f66e09a41d10612dbd0010a6abe Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Fri, 13 Apr 2018 18:48:41 +0200 Subject: [PATCH] Get rid of redundant `HashSet` --- src/librustc/mir/interpret/mod.rs | 2 +- src/librustc/ty/maps/on_disk_cache.rs | 31 +++++++++++++-------------- src/librustc_metadata/encoder.rs | 25 +++++++++++---------- 3 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 07768ac3a3b..c9eed0e4a28 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -178,7 +178,7 @@ pub fn specialized_encode_alloc_id< AllocKind::Fn.encode(encoder)?; fn_instance.encode(encoder)?; } else if let Some(did) = tcx.interpret_interner.get_static(alloc_id) { - // referring to statics doesn't need to know about their allocations, just hash the DefId + // referring to statics doesn't need to know about their allocations, just about its DefId AllocKind::Static.encode(encoder)?; did.encode(encoder)?; } else { diff --git a/src/librustc/ty/maps/on_disk_cache.rs b/src/librustc/ty/maps/on_disk_cache.rs index 62f2cd88935..d60206ffd32 100644 --- a/src/librustc/ty/maps/on_disk_cache.rs +++ b/src/librustc/ty/maps/on_disk_cache.rs @@ -201,7 +201,6 @@ impl<'sess> OnDiskCache<'sess> { predicate_shorthands: FxHashMap(), expn_info_shorthands: FxHashMap(), interpret_allocs: FxHashMap(), - interpret_alloc_ids: FxHashSet(), interpret_allocs_inverse: Vec::new(), codemap: CachingCodemapView::new(tcx.sess.codemap()), file_to_file_index, @@ -284,7 +283,12 @@ impl<'sess> OnDiskCache<'sess> { let mut interpret_alloc_index = Vec::new(); let mut n = 0; loop { - let new_n = encoder.interpret_alloc_ids.len(); + let new_n = encoder.interpret_allocs_inverse.len(); + // if we have found new ids, serialize those, too + if n == new_n { + // otherwise, abort + break; + } for idx in n..new_n { let id = encoder.interpret_allocs_inverse[idx]; let pos = AbsoluteBytePos::new(encoder.position()); @@ -295,11 +299,6 @@ impl<'sess> OnDiskCache<'sess> { id, )?; } - // if we have found new ids, serialize those, too - if n == new_n { - // otherwise, abort - break; - } n = new_n; } interpret_alloc_index @@ -802,7 +801,6 @@ struct CacheEncoder<'enc, 'a, 'tcx, E> expn_info_shorthands: FxHashMap, interpret_allocs: FxHashMap, interpret_allocs_inverse: Vec, - interpret_alloc_ids: FxHashSet, codemap: CachingCodemapView<'tcx>, file_to_file_index: FxHashMap<*const FileMap, FileMapIndex>, } @@ -839,14 +837,15 @@ impl<'enc, 'a, 'tcx, E> SpecializedEncoder for CacheEncoder< where E: 'enc + ty_codec::TyEncoder { fn specialized_encode(&mut self, alloc_id: &interpret::AllocId) -> Result<(), Self::Error> { - let index = if self.interpret_alloc_ids.insert(*alloc_id) { - let idx = self.interpret_alloc_ids.len() - 1; - assert_eq!(idx, self.interpret_allocs_inverse.len()); - self.interpret_allocs_inverse.push(*alloc_id); - assert!(self.interpret_allocs.insert(*alloc_id, idx).is_none()); - idx - } else { - self.interpret_allocs[alloc_id] + use std::collections::hash_map::Entry; + let index = match self.interpret_allocs.entry(*alloc_id) { + Entry::Occupied(e) => *e.get(), + Entry::Vacant(e) => { + let idx = self.interpret_allocs_inverse.len(); + self.interpret_allocs_inverse.push(*alloc_id); + e.insert(idx); + idx + }, }; index.encode(self) diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 5ecfbd7c6fb..a61428b841f 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -29,7 +29,7 @@ use rustc::ty::{self, Ty, TyCtxt, ReprOptions, SymbolName}; use rustc::ty::codec::{self as ty_codec, TyEncoder}; use rustc::session::config::{self, CrateTypeProcMacro}; -use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use rustc::util::nodemap::FxHashMap; use rustc_data_structures::stable_hasher::StableHasher; use rustc_serialize::{Encodable, Encoder, SpecializedEncoder, opaque}; @@ -62,7 +62,6 @@ pub struct EncodeContext<'a, 'tcx: 'a> { interpret_allocs: FxHashMap, interpret_allocs_inverse: Vec, - interpret_alloc_ids: FxHashSet, // This is used to speed up Span encoding. filemap_cache: Lrc, @@ -199,14 +198,15 @@ impl<'a, 'tcx> SpecializedEncoder> for EncodeContext<'a, 'tcx> { impl<'a, 'tcx> SpecializedEncoder for EncodeContext<'a, 'tcx> { fn specialized_encode(&mut self, alloc_id: &interpret::AllocId) -> Result<(), Self::Error> { - let index = if self.interpret_alloc_ids.insert(*alloc_id) { - let idx = self.interpret_alloc_ids.len() - 1; - assert_eq!(idx, self.interpret_allocs_inverse.len()); - self.interpret_allocs_inverse.push(*alloc_id); - assert!(self.interpret_allocs.insert(*alloc_id, idx).is_none()); - idx - } else { - self.interpret_allocs[alloc_id] + use std::collections::hash_map::Entry; + let index = match self.interpret_allocs.entry(*alloc_id) { + Entry::Occupied(e) => *e.get(), + Entry::Vacant(e) => { + let idx = self.interpret_allocs_inverse.len(); + self.interpret_allocs_inverse.push(*alloc_id); + e.insert(idx); + idx + }, }; index.encode(self) @@ -456,7 +456,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let mut n = 0; trace!("beginning to encode alloc ids"); loop { - let new_n = self.interpret_alloc_ids.len(); + let new_n = self.interpret_allocs_inverse.len(); // if we have found new ids, serialize those, too if n == new_n { // otherwise, abort @@ -487,7 +487,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let is_proc_macro = tcx.sess.crate_types.borrow().contains(&CrateTypeProcMacro); let has_default_lib_allocator = attr::contains_name(tcx.hir.krate_attrs(), "default_lib_allocator"); - let has_global_allocator = tcx.sess.has_global_allocator.get(); + let has_global_allocator = *tcx.sess.has_global_allocator.get(); let root = self.lazy(&CrateRoot { name: tcx.crate_name(LOCAL_CRATE), @@ -1792,7 +1792,6 @@ pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, filemap_cache: tcx.sess.codemap().files()[0].clone(), interpret_allocs: Default::default(), interpret_allocs_inverse: Default::default(), - interpret_alloc_ids: Default::default(), }; // Encode the rustc version string in a predictable location.