Rollup merge of #45263 - Manishearth:hashmap-clean, r=bluss

Do some cleanups for hashmaps

@mystor noticed some things whilst reading through the hashmap RawTable code.

Firstly, in RawTable we deal with this hash_offset value that is the offset of the list of hashes from the buffer start. This is always zero, and this isn't consistently used (which means that we would have bugs if we set it to something else). We should just remove this since it doesn't help us at all.

Secondly, the probing length tag is not copied when cloning a raw table. This is minor and basically means we do a bit more work than we need on further inserts on a cloned hashmap.

r? @Gankro
This commit is contained in:
kennytm 2017-10-15 14:21:55 +08:00 committed by GitHub
commit 5e8d407c31

View File

@ -717,26 +717,25 @@ fn calculate_offsets(hashes_size: usize,
(pairs_offset, end_of_pairs, oflo)
}
// Returns a tuple of (minimum required malloc alignment, hash_offset,
// Returns a tuple of (minimum required malloc alignment,
// array_size), from the start of a mallocated array.
fn calculate_allocation(hash_size: usize,
hash_align: usize,
pairs_size: usize,
pairs_align: usize)
-> (usize, usize, usize, bool) {
let hash_offset = 0;
-> (usize, usize, bool) {
let (_, end_of_pairs, oflo) = calculate_offsets(hash_size, pairs_size, pairs_align);
let align = cmp::max(hash_align, pairs_align);
(align, hash_offset, end_of_pairs, oflo)
(align, end_of_pairs, oflo)
}
#[test]
fn test_offset_calculation() {
assert_eq!(calculate_allocation(128, 8, 16, 8), (8, 0, 144, false));
assert_eq!(calculate_allocation(3, 1, 2, 1), (1, 0, 5, false));
assert_eq!(calculate_allocation(6, 2, 12, 4), (4, 0, 20, false));
assert_eq!(calculate_allocation(128, 8, 16, 8), (8, 144, false));
assert_eq!(calculate_allocation(3, 1, 2, 1), (1, 5, false));
assert_eq!(calculate_allocation(6, 2, 12, 4), (4, 20, false));
assert_eq!(calculate_offsets(128, 15, 4), (128, 143, false));
assert_eq!(calculate_offsets(3, 2, 4), (4, 6, false));
assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false));
@ -768,10 +767,10 @@ impl<K, V> RawTable<K, V> {
// This is great in theory, but in practice getting the alignment
// right is a little subtle. Therefore, calculating offsets has been
// factored out into a different function.
let (alignment, hash_offset, size, oflo) = calculate_allocation(hashes_size,
align_of::<HashUint>(),
pairs_size,
align_of::<(K, V)>());
let (alignment, size, oflo) = calculate_allocation(hashes_size,
align_of::<HashUint>(),
pairs_size,
align_of::<(K, V)>());
assert!(!oflo, "capacity overflow");
// One check for overflow that covers calculation and rounding of size.
@ -784,7 +783,7 @@ impl<K, V> RawTable<K, V> {
let buffer = Heap.alloc(Layout::from_size_align(size, alignment).unwrap())
.unwrap_or_else(|e| Heap.oom(e));
let hashes = buffer.offset(hash_offset as isize) as *mut HashUint;
let hashes = buffer as *mut HashUint;
RawTable {
capacity_mask: capacity.wrapping_sub(1),
@ -1157,6 +1156,7 @@ impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
}
new_ht.size = self.size();
new_ht.set_tag(self.tag());
new_ht
}
@ -1183,10 +1183,10 @@ unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for RawTable<K, V> {
let hashes_size = self.capacity() * size_of::<HashUint>();
let pairs_size = self.capacity() * size_of::<(K, V)>();
let (align, _, size, oflo) = calculate_allocation(hashes_size,
align_of::<HashUint>(),
pairs_size,
align_of::<(K, V)>());
let (align, size, oflo) = calculate_allocation(hashes_size,
align_of::<HashUint>(),
pairs_size,
align_of::<(K, V)>());
debug_assert!(!oflo, "should be impossible");