Fixes to collections to accommodate arith-overflow changes.
* `collections::btree::node`: accommodate (transient) underflow.
* `collections::btree::map`: avoid underflow during `fn next`
for `BTreeMap::range` methods.
* `collections::slice`: note that pnkfelix deliberately used
`new_pos_wrapping` only once; the other cases of arithmetic do not
over- nor underflow, which is a useful property to leave implicitly
checked/documented via the remaining calls to `fn new_pos(..)`.
* `collections::vec_deque` applied wrapping ops (somewhat blindly)
to two implementation methods, and many tests.
* `std::collections:#️⃣:table` : Use `OverflowingOps` trait to
track overflow during `calculate_offsets` and `calculate_allocation`
functions.
This commit is contained in:
parent
cf18e9c331
commit
e7c986105f
src
@ -25,7 +25,7 @@ use core::fmt::Debug;
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::iter::{Map, FromIterator, IntoIterator};
|
||||
use core::ops::{Index, IndexMut};
|
||||
use core::{iter, fmt, mem};
|
||||
use core::{iter, fmt, mem, usize};
|
||||
use Bound::{self, Included, Excluded, Unbounded};
|
||||
|
||||
use borrow::Borrow;
|
||||
@ -1467,7 +1467,7 @@ macro_rules! range_impl {
|
||||
$Range {
|
||||
inner: AbsIter {
|
||||
traversals: traversals,
|
||||
size: 0, // unused
|
||||
size: usize::MAX, // unused
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1215,7 +1215,8 @@ impl<K, V> Node<K, V> {
|
||||
ptr::copy(
|
||||
self.edges_mut().as_mut_ptr().offset(index as isize),
|
||||
self.edges().as_ptr().offset(index as isize + 1),
|
||||
self.len() - index + 1
|
||||
// index can be == len+1, so do the +1 first to avoid underflow.
|
||||
(self.len() + 1) - index
|
||||
);
|
||||
|
||||
edge
|
||||
|
@ -96,6 +96,7 @@ use core::iter::{range_step, MultiplicativeIterator};
|
||||
use core::marker::Sized;
|
||||
use core::mem::size_of;
|
||||
use core::mem;
|
||||
use core::num::wrapping::WrappingOps;
|
||||
use core::ops::FnMut;
|
||||
use core::option::Option::{self, Some, None};
|
||||
use core::ptr::PtrExt;
|
||||
@ -1209,10 +1210,14 @@ struct SizeDirection {
|
||||
impl Iterator for ElementSwaps {
|
||||
type Item = (usize, usize);
|
||||
|
||||
#[inline]
|
||||
// #[inline]
|
||||
fn next(&mut self) -> Option<(usize, usize)> {
|
||||
fn new_pos_wrapping(i: usize, s: Direction) -> usize {
|
||||
i.wrapping_add(match s { Pos => 1, Neg => -1 })
|
||||
}
|
||||
|
||||
fn new_pos(i: usize, s: Direction) -> usize {
|
||||
i + match s { Pos => 1, Neg => -1 }
|
||||
match s { Pos => i + 1, Neg => i - 1 }
|
||||
}
|
||||
|
||||
// Find the index of the largest mobile element:
|
||||
@ -1220,7 +1225,7 @@ impl Iterator for ElementSwaps {
|
||||
// swap should be with a smaller `size` element.
|
||||
let max = self.sdir.iter().cloned().enumerate()
|
||||
.filter(|&(i, sd)|
|
||||
new_pos(i, sd.dir) < self.sdir.len() &&
|
||||
new_pos_wrapping(i, sd.dir) < self.sdir.len() &&
|
||||
self.sdir[new_pos(i, sd.dir)].size < sd.size)
|
||||
.max_by(|&(_, sd)| sd.size);
|
||||
match max {
|
||||
|
@ -26,6 +26,7 @@ use core::fmt;
|
||||
use core::iter::{self, repeat, FromIterator, IntoIterator, RandomAccessIterator};
|
||||
use core::mem;
|
||||
use core::num::{Int, UnsignedInt};
|
||||
use core::num::wrapping::WrappingOps;
|
||||
use core::ops::{Index, IndexMut};
|
||||
use core::ptr::{self, Unique};
|
||||
use core::raw::Slice as RawSlice;
|
||||
@ -120,6 +121,20 @@ impl<T> VecDeque<T> {
|
||||
#[inline]
|
||||
fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap) }
|
||||
|
||||
/// Returns the index in the underlying buffer for a given logical element
|
||||
/// index + addend.
|
||||
#[inline]
|
||||
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
|
||||
wrap_index(idx.wrapping_add(addend), self.cap)
|
||||
}
|
||||
|
||||
/// Returns the index in the underlying buffer for a given logical element
|
||||
/// index - subtrahend.
|
||||
#[inline]
|
||||
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
|
||||
wrap_index(idx.wrapping_sub(subtrahend), self.cap)
|
||||
}
|
||||
|
||||
/// Copies a contiguous block of memory len long from src to dst
|
||||
#[inline]
|
||||
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
|
||||
@ -197,7 +212,7 @@ impl<T> VecDeque<T> {
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get(&self, i: usize) -> Option<&T> {
|
||||
if i < self.len() {
|
||||
let idx = self.wrap_index(self.tail + i);
|
||||
let idx = self.wrap_add(self.tail, i);
|
||||
unsafe { Some(&*self.ptr.offset(idx as isize)) }
|
||||
} else {
|
||||
None
|
||||
@ -227,7 +242,7 @@ impl<T> VecDeque<T> {
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_mut(&mut self, i: usize) -> Option<&mut T> {
|
||||
if i < self.len() {
|
||||
let idx = self.wrap_index(self.tail + i);
|
||||
let idx = self.wrap_add(self.tail, i);
|
||||
unsafe { Some(&mut *self.ptr.offset(idx as isize)) }
|
||||
} else {
|
||||
None
|
||||
@ -257,8 +272,8 @@ impl<T> VecDeque<T> {
|
||||
pub fn swap(&mut self, i: usize, j: usize) {
|
||||
assert!(i < self.len());
|
||||
assert!(j < self.len());
|
||||
let ri = self.wrap_index(self.tail + i);
|
||||
let rj = self.wrap_index(self.tail + j);
|
||||
let ri = self.wrap_add(self.tail, i);
|
||||
let rj = self.wrap_add(self.tail, j);
|
||||
unsafe {
|
||||
ptr::swap(self.ptr.offset(ri as isize), self.ptr.offset(rj as isize))
|
||||
}
|
||||
@ -427,7 +442,7 @@ impl<T> VecDeque<T> {
|
||||
// [. . . o o o o o o o . . . . . . ]
|
||||
// H T
|
||||
// [o o . o o o o o ]
|
||||
let len = self.wrap_index(self.head - target_cap);
|
||||
let len = self.wrap_sub(self.head, target_cap);
|
||||
unsafe {
|
||||
self.copy_nonoverlapping(0, target_cap, len);
|
||||
}
|
||||
@ -438,7 +453,7 @@ impl<T> VecDeque<T> {
|
||||
// [o o o o o . . . . . . . . . o o ]
|
||||
// H T
|
||||
// [o o o o o . o o ]
|
||||
debug_assert!(self.wrap_index(self.head - 1) < target_cap);
|
||||
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
|
||||
let len = self.cap - self.tail;
|
||||
let new_tail = target_cap - len;
|
||||
unsafe {
|
||||
@ -775,7 +790,7 @@ impl<T> VecDeque<T> {
|
||||
None
|
||||
} else {
|
||||
let tail = self.tail;
|
||||
self.tail = self.wrap_index(self.tail + 1);
|
||||
self.tail = self.wrap_add(self.tail, 1);
|
||||
unsafe { Some(self.buffer_read(tail)) }
|
||||
}
|
||||
}
|
||||
@ -799,7 +814,7 @@ impl<T> VecDeque<T> {
|
||||
debug_assert!(!self.is_full());
|
||||
}
|
||||
|
||||
self.tail = self.wrap_index(self.tail - 1);
|
||||
self.tail = self.wrap_sub(self.tail, 1);
|
||||
let tail = self.tail;
|
||||
unsafe { self.buffer_write(tail, t); }
|
||||
}
|
||||
@ -824,7 +839,7 @@ impl<T> VecDeque<T> {
|
||||
}
|
||||
|
||||
let head = self.head;
|
||||
self.head = self.wrap_index(self.head + 1);
|
||||
self.head = self.wrap_add(self.head, 1);
|
||||
unsafe { self.buffer_write(head, t) }
|
||||
}
|
||||
|
||||
@ -847,7 +862,7 @@ impl<T> VecDeque<T> {
|
||||
if self.is_empty() {
|
||||
None
|
||||
} else {
|
||||
self.head = self.wrap_index(self.head - 1);
|
||||
self.head = self.wrap_sub(self.head, 1);
|
||||
let head = self.head;
|
||||
unsafe { Some(self.buffer_read(head)) }
|
||||
}
|
||||
@ -971,7 +986,7 @@ impl<T> VecDeque<T> {
|
||||
// A - The element that should be after the insertion point
|
||||
// M - Indicates element was moved
|
||||
|
||||
let idx = self.wrap_index(self.tail + i);
|
||||
let idx = self.wrap_add(self.tail, i);
|
||||
|
||||
let distance_to_tail = i;
|
||||
let distance_to_head = self.len() - i;
|
||||
@ -990,7 +1005,7 @@ impl<T> VecDeque<T> {
|
||||
// [A o o o o o o o . . . . . I]
|
||||
//
|
||||
|
||||
self.tail = self.wrap_index(self.tail - 1);
|
||||
self.tail = self.wrap_sub(self.tail, 1);
|
||||
},
|
||||
(true, true, _) => unsafe {
|
||||
// contiguous, insert closer to tail:
|
||||
@ -1012,7 +1027,7 @@ impl<T> VecDeque<T> {
|
||||
// [o I A o o o o o . . . . . . . o]
|
||||
// M M
|
||||
|
||||
let new_tail = self.wrap_index(self.tail - 1);
|
||||
let new_tail = self.wrap_sub(self.tail, 1);
|
||||
|
||||
self.copy(new_tail, self.tail, 1);
|
||||
// Already moved the tail, so we only copy `i - 1` elements.
|
||||
@ -1031,7 +1046,7 @@ impl<T> VecDeque<T> {
|
||||
// M M M
|
||||
|
||||
self.copy(idx + 1, idx, self.head - idx);
|
||||
self.head = self.wrap_index(self.head + 1);
|
||||
self.head = self.wrap_add(self.head, 1);
|
||||
},
|
||||
(false, true, true) => unsafe {
|
||||
// discontiguous, insert closer to tail, tail section:
|
||||
@ -1123,7 +1138,7 @@ impl<T> VecDeque<T> {
|
||||
}
|
||||
|
||||
// tail might've been changed so we need to recalculate
|
||||
let new_idx = self.wrap_index(self.tail + i);
|
||||
let new_idx = self.wrap_add(self.tail, i);
|
||||
unsafe {
|
||||
self.buffer_write(new_idx, t);
|
||||
}
|
||||
@ -1170,7 +1185,7 @@ impl<T> VecDeque<T> {
|
||||
// R - Indicates element that is being removed
|
||||
// M - Indicates element was moved
|
||||
|
||||
let idx = self.wrap_index(self.tail + i);
|
||||
let idx = self.wrap_add(self.tail, i);
|
||||
|
||||
let elem = unsafe {
|
||||
Some(self.buffer_read(idx))
|
||||
@ -1219,7 +1234,7 @@ impl<T> VecDeque<T> {
|
||||
// M M
|
||||
|
||||
self.copy(self.tail + 1, self.tail, i);
|
||||
self.tail = self.wrap_index(self.tail + 1);
|
||||
self.tail = self.wrap_add(self.tail, 1);
|
||||
},
|
||||
(false, false, false) => unsafe {
|
||||
// discontiguous, remove closer to head, head section:
|
||||
@ -1265,7 +1280,7 @@ impl<T> VecDeque<T> {
|
||||
self.copy(0, 1, self.head - 1);
|
||||
}
|
||||
|
||||
self.head = self.wrap_index(self.head - 1);
|
||||
self.head = self.wrap_sub(self.head, 1);
|
||||
},
|
||||
(false, true, false) => unsafe {
|
||||
// discontiguous, remove closer to tail, head section:
|
||||
@ -1286,7 +1301,7 @@ impl<T> VecDeque<T> {
|
||||
// move elements from tail to end forward, excluding the last one
|
||||
self.copy(self.tail + 1, self.tail, self.cap - self.tail - 1);
|
||||
|
||||
self.tail = self.wrap_index(self.tail + 1);
|
||||
self.tail = self.wrap_add(self.tail, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1354,7 +1369,7 @@ impl<T> VecDeque<T> {
|
||||
}
|
||||
|
||||
// Cleanup where the ends of the buffers are
|
||||
self.head = self.wrap_index(self.head - other_len);
|
||||
self.head = self.wrap_sub(self.head, other_len);
|
||||
other.head = other.wrap_index(other_len);
|
||||
|
||||
other
|
||||
@ -1429,7 +1444,7 @@ fn wrap_index(index: usize, size: usize) -> usize {
|
||||
#[inline]
|
||||
fn count(tail: usize, head: usize, size: usize) -> usize {
|
||||
// size is always a power of 2
|
||||
(head - tail) & (size - 1)
|
||||
(head.wrapping_sub(tail)) & (size - 1)
|
||||
}
|
||||
|
||||
/// `VecDeque` iterator.
|
||||
@ -1461,7 +1476,7 @@ impl<'a, T> Iterator for Iter<'a, T> {
|
||||
return None;
|
||||
}
|
||||
let tail = self.tail;
|
||||
self.tail = wrap_index(self.tail + 1, self.ring.len());
|
||||
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
|
||||
unsafe { Some(self.ring.get_unchecked(tail)) }
|
||||
}
|
||||
|
||||
@ -1479,7 +1494,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
|
||||
if self.tail == self.head {
|
||||
return None;
|
||||
}
|
||||
self.head = wrap_index(self.head - 1, self.ring.len());
|
||||
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
|
||||
unsafe { Some(self.ring.get_unchecked(self.head)) }
|
||||
}
|
||||
}
|
||||
@ -1500,7 +1515,7 @@ impl<'a, T> RandomAccessIterator for Iter<'a, T> {
|
||||
if j >= self.indexable() {
|
||||
None
|
||||
} else {
|
||||
let idx = wrap_index(self.tail + j, self.ring.len());
|
||||
let idx = wrap_index(self.tail.wrapping_add(j), self.ring.len());
|
||||
unsafe { Some(self.ring.get_unchecked(idx)) }
|
||||
}
|
||||
}
|
||||
@ -1524,7 +1539,7 @@ impl<'a, T> Iterator for IterMut<'a, T> {
|
||||
return None;
|
||||
}
|
||||
let tail = self.tail;
|
||||
self.tail = wrap_index(self.tail + 1, self.ring.len());
|
||||
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
|
||||
|
||||
unsafe {
|
||||
let elem = self.ring.get_unchecked_mut(tail);
|
||||
@ -1546,7 +1561,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
|
||||
if self.tail == self.head {
|
||||
return None;
|
||||
}
|
||||
self.head = wrap_index(self.head - 1, self.ring.len());
|
||||
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
|
||||
|
||||
unsafe {
|
||||
let elem = self.ring.get_unchecked_mut(self.head);
|
||||
|
@ -20,6 +20,7 @@ use marker::{Copy, Send, Sync, Sized, self};
|
||||
use mem::{min_align_of, size_of};
|
||||
use mem;
|
||||
use num::{Int, UnsignedInt};
|
||||
use num::wrapping::{OverflowingOps, WrappingOps};
|
||||
use ops::{Deref, DerefMut, Drop};
|
||||
use option::Option;
|
||||
use option::Option::{Some, None};
|
||||
@ -371,7 +372,6 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
|
||||
/// In the cited blog posts above, this is called the "distance to
|
||||
/// initial bucket", or DIB. Also known as "probe count".
|
||||
pub fn distance(&self) -> usize {
|
||||
use core::num::wrapping::WrappingOps;
|
||||
// Calculates the distance one has to travel when going from
|
||||
// `hash mod capacity` onwards to `idx mod capacity`, wrapping around
|
||||
// if the destination is not reached before the end of the table.
|
||||
@ -528,13 +528,13 @@ fn test_rounding() {
|
||||
fn calculate_offsets(hashes_size: usize,
|
||||
keys_size: usize, keys_align: usize,
|
||||
vals_align: usize)
|
||||
-> (usize, usize) {
|
||||
-> (usize, usize, bool) {
|
||||
let keys_offset = round_up_to_next(hashes_size, keys_align);
|
||||
let end_of_keys = keys_offset + keys_size;
|
||||
let (end_of_keys, oflo) = keys_offset.overflowing_add(keys_size);
|
||||
|
||||
let vals_offset = round_up_to_next(end_of_keys, vals_align);
|
||||
|
||||
(keys_offset, vals_offset)
|
||||
(keys_offset, vals_offset, oflo)
|
||||
}
|
||||
|
||||
// Returns a tuple of (minimum required malloc alignment, hash_offset,
|
||||
@ -542,26 +542,26 @@ fn calculate_offsets(hashes_size: usize,
|
||||
fn calculate_allocation(hash_size: usize, hash_align: usize,
|
||||
keys_size: usize, keys_align: usize,
|
||||
vals_size: usize, vals_align: usize)
|
||||
-> (usize, usize, usize) {
|
||||
-> (usize, usize, usize, bool) {
|
||||
let hash_offset = 0;
|
||||
let (_, vals_offset) = calculate_offsets(hash_size,
|
||||
keys_size, keys_align,
|
||||
vals_align);
|
||||
let end_of_vals = vals_offset + vals_size;
|
||||
let (_, vals_offset, oflo) = calculate_offsets(hash_size,
|
||||
keys_size, keys_align,
|
||||
vals_align);
|
||||
let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size);
|
||||
|
||||
let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
|
||||
|
||||
(min_align, hash_offset, end_of_vals)
|
||||
(min_align, hash_offset, end_of_vals, oflo || oflo2)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offset_calculation() {
|
||||
assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148));
|
||||
assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6));
|
||||
assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48));
|
||||
assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144));
|
||||
assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5));
|
||||
assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24));
|
||||
assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148, false));
|
||||
assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6, false));
|
||||
assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48, false));
|
||||
assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144, false));
|
||||
assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5, false));
|
||||
assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24, false));
|
||||
}
|
||||
|
||||
impl<K, V> RawTable<K, V> {
|
||||
@ -591,12 +591,14 @@ impl<K, V> RawTable<K, V> {
|
||||
// This is great in theory, but in practice getting the alignment
|
||||
// right is a little subtle. Therefore, calculating offsets has been
|
||||
// factored out into a different function.
|
||||
let (malloc_alignment, hash_offset, size) =
|
||||
let (malloc_alignment, hash_offset, size, oflo) =
|
||||
calculate_allocation(
|
||||
hashes_size, min_align_of::<u64>(),
|
||||
keys_size, min_align_of::< K >(),
|
||||
vals_size, min_align_of::< V >());
|
||||
|
||||
assert!(!oflo, "capacity overflow");
|
||||
|
||||
// One check for overflow that covers calculation and rounding of size.
|
||||
let size_of_bucket = size_of::<u64>().checked_add(size_of::<K>()).unwrap()
|
||||
.checked_add(size_of::<V>()).unwrap();
|
||||
@ -622,10 +624,11 @@ impl<K, V> RawTable<K, V> {
|
||||
let keys_size = self.capacity * size_of::<K>();
|
||||
|
||||
let buffer = *self.hashes as *mut u8;
|
||||
let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
|
||||
keys_size, min_align_of::<K>(),
|
||||
min_align_of::<V>());
|
||||
|
||||
let (keys_offset, vals_offset, oflo) =
|
||||
calculate_offsets(hashes_size,
|
||||
keys_size, min_align_of::<K>(),
|
||||
min_align_of::<V>());
|
||||
debug_assert!(!oflo, "capacity overflow");
|
||||
unsafe {
|
||||
RawBucket {
|
||||
hash: *self.hashes,
|
||||
@ -999,9 +1002,12 @@ impl<K, V> Drop for RawTable<K, V> {
|
||||
let hashes_size = self.capacity * size_of::<u64>();
|
||||
let keys_size = self.capacity * size_of::<K>();
|
||||
let vals_size = self.capacity * size_of::<V>();
|
||||
let (align, _, size) = calculate_allocation(hashes_size, min_align_of::<u64>(),
|
||||
keys_size, min_align_of::<K>(),
|
||||
vals_size, min_align_of::<V>());
|
||||
let (align, _, size, oflo) =
|
||||
calculate_allocation(hashes_size, min_align_of::<u64>(),
|
||||
keys_size, min_align_of::<K>(),
|
||||
vals_size, min_align_of::<V>());
|
||||
|
||||
debug_assert!(!oflo, "should be impossible");
|
||||
|
||||
unsafe {
|
||||
deallocate(*self.hashes as *mut u8, size, align);
|
||||
|
Loading…
x
Reference in New Issue
Block a user