Make ZST checks in core/alloc more readable
There's a bunch of these checks because of special handing for ZSTs in various unsafe implementations of stuff. This lets them be `T::IS_ZST` instead of `mem::size_of::<T>() == 0` every time, making them both more readable and more terse. *Not* proposed for stabilization at this time. Would be `pub(crate)` except `alloc` wants to use it too. (And while it doesn't matter now, if we ever get something like 85836 making it a const can help codegen be simpler.)
This commit is contained in:
parent
c773c134c9
commit
44b4ce1d61
@ -12,7 +12,7 @@
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::iter::{repeat_with, FromIterator};
|
||||
use core::marker::PhantomData;
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit};
|
||||
use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
|
||||
use core::ops::{Index, IndexMut, Range, RangeBounds};
|
||||
use core::ptr::{self, NonNull};
|
||||
use core::slice;
|
||||
@ -177,7 +177,7 @@ fn ptr(&self) -> *mut T {
|
||||
/// Marginally more convenient
|
||||
#[inline]
|
||||
fn cap(&self) -> usize {
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// For zero sized types, we are always at maximum capacity
|
||||
MAXIMUM_ZST_CAPACITY
|
||||
} else {
|
||||
@ -3038,7 +3038,7 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
|
||||
/// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
|
||||
fn from(mut other: Vec<T, A>) -> Self {
|
||||
let len = other.len();
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// There's no actual allocation for ZSTs to worry about capacity,
|
||||
// but `VecDeque` can't handle as much length as `Vec`.
|
||||
assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
|
||||
@ -3124,7 +3124,7 @@ fn from(mut other: VecDeque<T, A>) -> Self {
|
||||
fn from(arr: [T; N]) -> Self {
|
||||
let mut deq = VecDeque::with_capacity(N);
|
||||
let arr = ManuallyDrop::new(arr);
|
||||
if mem::size_of::<T>() != 0 {
|
||||
if !<T>::IS_ZST {
|
||||
// SAFETY: VecDeque::with_capacity ensures that there is enough capacity.
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);
|
||||
|
@ -136,6 +136,7 @@
|
||||
#![feature(receiver_trait)]
|
||||
#![feature(saturating_int_impl)]
|
||||
#![feature(set_ptr_value)]
|
||||
#![feature(sized_type_properties)]
|
||||
#![feature(slice_from_ptr_range)]
|
||||
#![feature(slice_group_by)]
|
||||
#![feature(slice_ptr_get)]
|
||||
|
@ -3,7 +3,7 @@
|
||||
use core::alloc::LayoutError;
|
||||
use core::cmp;
|
||||
use core::intrinsics;
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit};
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
|
||||
use core::ops::Drop;
|
||||
use core::ptr::{self, NonNull, Unique};
|
||||
use core::slice;
|
||||
@ -168,7 +168,7 @@ pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
|
||||
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
|
||||
if mem::size_of::<T>() == 0 || capacity == 0 {
|
||||
if T::IS_ZST || capacity == 0 {
|
||||
Self::new_in(alloc)
|
||||
} else {
|
||||
// We avoid `unwrap_or_else` here because it bloats the amount of
|
||||
@ -229,7 +229,7 @@ pub fn ptr(&self) -> *mut T {
|
||||
/// This will always be `usize::MAX` if `T` is zero-sized.
|
||||
#[inline(always)]
|
||||
pub fn capacity(&self) -> usize {
|
||||
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
|
||||
if T::IS_ZST { usize::MAX } else { self.cap }
|
||||
}
|
||||
|
||||
/// Returns a shared reference to the allocator backing this `RawVec`.
|
||||
@ -238,7 +238,7 @@ pub fn allocator(&self) -> &A {
|
||||
}
|
||||
|
||||
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
|
||||
if mem::size_of::<T>() == 0 || self.cap == 0 {
|
||||
if T::IS_ZST || self.cap == 0 {
|
||||
None
|
||||
} else {
|
||||
// We have an allocated chunk of memory, so we can bypass runtime
|
||||
@ -380,7 +380,7 @@ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryRes
|
||||
// This is ensured by the calling contexts.
|
||||
debug_assert!(additional > 0);
|
||||
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// Since we return a capacity of `usize::MAX` when `elem_size` is
|
||||
// 0, getting to here necessarily means the `RawVec` is overfull.
|
||||
return Err(CapacityOverflow.into());
|
||||
@ -406,7 +406,7 @@ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryRes
|
||||
// `grow_amortized`, but this method is usually instantiated less often so
|
||||
// it's less critical.
|
||||
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// Since we return a capacity of `usize::MAX` when the type size is
|
||||
// 0, getting to here necessarily means the `RawVec` is overfull.
|
||||
return Err(CapacityOverflow.into());
|
||||
|
@ -16,9 +16,7 @@
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
use core::cmp::Ordering::{self, Less};
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
use core::mem;
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
use core::mem::size_of;
|
||||
use core::mem::{self, SizedTypeProperties};
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
use core::ptr;
|
||||
|
||||
@ -1018,7 +1016,7 @@ fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
|
||||
const MIN_RUN: usize = 10;
|
||||
|
||||
// Sorting has no meaningful behavior on zero-sized types.
|
||||
if size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::alloc::{Allocator, Global};
|
||||
use core::fmt;
|
||||
use core::iter::{FusedIterator, TrustedLen};
|
||||
use core::mem::{self, ManuallyDrop};
|
||||
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
|
||||
use core::ptr::{self, NonNull};
|
||||
use core::slice::{self};
|
||||
|
||||
@ -202,7 +202,7 @@ fn drop(&mut self) {
|
||||
|
||||
let mut vec = self.vec;
|
||||
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
|
||||
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
|
||||
unsafe {
|
||||
|
@ -135,7 +135,7 @@
|
||||
//! vec.truncate(write_idx);
|
||||
//! ```
|
||||
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
|
||||
use core::mem::{self, ManuallyDrop};
|
||||
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
|
||||
use core::ptr::{self};
|
||||
|
||||
use super::{InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
|
||||
@ -154,7 +154,7 @@ impl<T, I> SpecFromIter<T, I> for Vec<T>
|
||||
default fn from_iter(mut iterator: I) -> Self {
|
||||
// See "Layout constraints" section in the module documentation. We rely on const
|
||||
// optimization here since these conditions currently cannot be expressed as trait bounds
|
||||
if mem::size_of::<T>() == 0
|
||||
if T::IS_ZST
|
||||
|| mem::size_of::<T>()
|
||||
!= mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
|
||||
|| mem::align_of::<T>()
|
||||
|
@ -8,7 +8,7 @@
|
||||
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
|
||||
};
|
||||
use core::marker::PhantomData;
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit};
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
use core::ops::Deref;
|
||||
use core::ptr::{self, NonNull};
|
||||
@ -149,7 +149,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.ptr == self.end {
|
||||
None
|
||||
} else if mem::size_of::<T>() == 0 {
|
||||
} else if T::IS_ZST {
|
||||
// purposefully don't use 'ptr.offset' because for
|
||||
// vectors with 0-size elements this would return the
|
||||
// same pointer.
|
||||
@ -167,7 +167,7 @@ fn next(&mut self) -> Option<T> {
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let exact = if mem::size_of::<T>() == 0 {
|
||||
let exact = if T::IS_ZST {
|
||||
self.end.addr().wrapping_sub(self.ptr.addr())
|
||||
} else {
|
||||
unsafe { self.end.sub_ptr(self.ptr) }
|
||||
@ -179,7 +179,7 @@ fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
|
||||
let step_size = self.len().min(n);
|
||||
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
|
||||
// effectively results in unsigned pointers representing positions 0..usize::MAX,
|
||||
// which is valid for ZSTs.
|
||||
@ -209,7 +209,7 @@ fn count(self) -> usize {
|
||||
|
||||
let len = self.len();
|
||||
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
if len < N {
|
||||
self.forget_remaining_elements();
|
||||
// Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
|
||||
@ -253,7 +253,7 @@ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
|
||||
// that `T: Copy` so reading elements from the buffer doesn't invalidate
|
||||
// them for `Drop`.
|
||||
unsafe {
|
||||
if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
|
||||
if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -264,7 +264,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
|
||||
fn next_back(&mut self) -> Option<T> {
|
||||
if self.end == self.ptr {
|
||||
None
|
||||
} else if mem::size_of::<T>() == 0 {
|
||||
} else if T::IS_ZST {
|
||||
// See above for why 'ptr.offset' isn't used
|
||||
self.end = self.end.wrapping_byte_sub(1);
|
||||
|
||||
@ -280,7 +280,7 @@ fn next_back(&mut self) -> Option<T> {
|
||||
#[inline]
|
||||
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
|
||||
let step_size = self.len().min(n);
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// SAFETY: same as for advance_by()
|
||||
self.end = self.end.wrapping_byte_sub(step_size);
|
||||
} else {
|
||||
|
@ -64,7 +64,7 @@
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
use core::iter::FromIterator;
|
||||
use core::marker::PhantomData;
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit};
|
||||
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
|
||||
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
|
||||
use core::ptr::{self, NonNull};
|
||||
use core::slice::{self, SliceIndex};
|
||||
@ -2347,7 +2347,7 @@ pub fn extend_from_within<R>(&mut self, src: R)
|
||||
#[unstable(feature = "slice_flatten", issue = "95629")]
|
||||
pub fn into_flattened(self) -> Vec<T, A> {
|
||||
let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
|
||||
let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
|
||||
let (new_len, new_cap) = if T::IS_ZST {
|
||||
(len.checked_mul(N).expect("vec len overflow"), usize::MAX)
|
||||
} else {
|
||||
// SAFETY:
|
||||
@ -2677,7 +2677,7 @@ fn into_iter(self) -> IntoIter<T, A> {
|
||||
let mut me = ManuallyDrop::new(self);
|
||||
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
|
||||
let begin = me.as_mut_ptr();
|
||||
let end = if mem::size_of::<T>() == 0 {
|
||||
let end = if T::IS_ZST {
|
||||
begin.wrapping_byte_add(me.len())
|
||||
} else {
|
||||
begin.add(me.len()) as *const T
|
||||
|
@ -1178,3 +1178,18 @@ pub const fn discriminant<T>(v: &T) -> Discriminant<T> {
|
||||
pub const fn variant_count<T>() -> usize {
|
||||
intrinsics::variant_count::<T>()
|
||||
}
|
||||
|
||||
/// This is here only to simplify all the ZST checks we need in the library.
|
||||
/// It's not on a stabilization track right now.
|
||||
#[doc(hidden)]
|
||||
#[unstable(feature = "sized_type_properties", issue = "none")]
|
||||
pub trait SizedTypeProperties: Sized {
|
||||
/// `true` if this type requires no storage.
|
||||
/// `false` if its [size](size_of) is greater than zero.
|
||||
#[doc(hidden)]
|
||||
#[unstable(feature = "sized_type_properties", issue = "none")]
|
||||
const IS_ZST: bool = size_of::<Self>() == 0;
|
||||
}
|
||||
#[doc(hidden)]
|
||||
#[unstable(feature = "sized_type_properties", issue = "none")]
|
||||
impl<T> SizedTypeProperties for T {}
|
||||
|
@ -9,7 +9,7 @@
|
||||
use crate::intrinsics::{assume, exact_div, unchecked_sub};
|
||||
use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
|
||||
use crate::marker::{PhantomData, Send, Sized, Sync};
|
||||
use crate::mem;
|
||||
use crate::mem::{self, SizedTypeProperties};
|
||||
use crate::num::NonZeroUsize;
|
||||
use crate::ptr::NonNull;
|
||||
|
||||
@ -91,7 +91,7 @@ pub(super) fn new(slice: &'a [T]) -> Self {
|
||||
unsafe {
|
||||
assume(!ptr.is_null());
|
||||
|
||||
let end = if mem::size_of::<T>() == 0 {
|
||||
let end = if T::IS_ZST {
|
||||
ptr.wrapping_byte_add(slice.len())
|
||||
} else {
|
||||
ptr.add(slice.len())
|
||||
@ -227,7 +227,7 @@ pub(super) fn new(slice: &'a mut [T]) -> Self {
|
||||
unsafe {
|
||||
assume(!ptr.is_null());
|
||||
|
||||
let end = if mem::size_of::<T>() == 0 {
|
||||
let end = if T::IS_ZST {
|
||||
ptr.wrapping_byte_add(slice.len())
|
||||
} else {
|
||||
ptr.add(slice.len())
|
||||
|
@ -100,7 +100,7 @@ unsafe fn post_inc_start(&mut self, offset: usize) -> * $raw_mut T {
|
||||
// Unsafe because the offset must not exceed `self.len()`.
|
||||
#[inline(always)]
|
||||
unsafe fn pre_dec_end(&mut self, offset: usize) -> * $raw_mut T {
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
zst_shrink!(self, offset);
|
||||
self.ptr.as_ptr()
|
||||
} else {
|
||||
@ -140,7 +140,7 @@ fn next(&mut self) -> Option<$elem> {
|
||||
// since we check if the iterator is empty first.
|
||||
unsafe {
|
||||
assume(!self.ptr.as_ptr().is_null());
|
||||
if mem::size_of::<T>() != 0 {
|
||||
if !<T>::IS_ZST {
|
||||
assume(!self.end.is_null());
|
||||
}
|
||||
if is_empty!(self) {
|
||||
@ -166,7 +166,7 @@ fn count(self) -> usize {
|
||||
fn nth(&mut self, n: usize) -> Option<$elem> {
|
||||
if n >= len!(self) {
|
||||
// This iterator is now empty.
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// We have to do it this way as `ptr` may never be 0, but `end`
|
||||
// could be (due to wrapping).
|
||||
self.end = self.ptr.as_ptr();
|
||||
@ -355,7 +355,7 @@ fn next_back(&mut self) -> Option<$elem> {
|
||||
// empty first.
|
||||
unsafe {
|
||||
assume(!self.ptr.as_ptr().is_null());
|
||||
if mem::size_of::<T>() != 0 {
|
||||
if !<T>::IS_ZST {
|
||||
assume(!self.end.is_null());
|
||||
}
|
||||
if is_empty!(self) {
|
||||
|
@ -9,7 +9,7 @@
|
||||
use crate::cmp::Ordering::{self, Greater, Less};
|
||||
use crate::intrinsics::{assert_unsafe_precondition, exact_div};
|
||||
use crate::marker::Copy;
|
||||
use crate::mem;
|
||||
use crate::mem::{self, SizedTypeProperties};
|
||||
use crate::num::NonZeroUsize;
|
||||
use crate::ops::{Bound, FnMut, OneSidedRange, Range, RangeBounds};
|
||||
use crate::option::Option;
|
||||
@ -3459,7 +3459,7 @@ fn gcd(a: usize, b: usize) -> usize {
|
||||
#[must_use]
|
||||
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
|
||||
// Note that most of this function will be constant-evaluated,
|
||||
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
|
||||
if U::IS_ZST || T::IS_ZST {
|
||||
// handle ZSTs specially, which is – don't handle them at all.
|
||||
return (self, &[], &[]);
|
||||
}
|
||||
@ -3520,7 +3520,7 @@ pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
|
||||
#[must_use]
|
||||
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
|
||||
// Note that most of this function will be constant-evaluated,
|
||||
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
|
||||
if U::IS_ZST || T::IS_ZST {
|
||||
// handle ZSTs specially, which is – don't handle them at all.
|
||||
return (self, &mut [], &mut []);
|
||||
}
|
||||
@ -4066,7 +4066,7 @@ pub fn take_last_mut<'a>(self: &mut &'a mut Self) -> Option<&'a mut T> {
|
||||
/// ```
|
||||
#[unstable(feature = "slice_flatten", issue = "95629")]
|
||||
pub fn flatten(&self) -> &[T] {
|
||||
let len = if crate::mem::size_of::<T>() == 0 {
|
||||
let len = if T::IS_ZST {
|
||||
self.len().checked_mul(N).expect("slice len overflow")
|
||||
} else {
|
||||
// SAFETY: `self.len() * N` cannot overflow because `self` is
|
||||
@ -4104,7 +4104,7 @@ pub fn flatten(&self) -> &[T] {
|
||||
/// ```
|
||||
#[unstable(feature = "slice_flatten", issue = "95629")]
|
||||
pub fn flatten_mut(&mut self) -> &mut [T] {
|
||||
let len = if crate::mem::size_of::<T>() == 0 {
|
||||
let len = if T::IS_ZST {
|
||||
self.len().checked_mul(N).expect("slice len overflow")
|
||||
} else {
|
||||
// SAFETY: `self.len() * N` cannot overflow because `self` is
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::cmp;
|
||||
use crate::mem::{self, MaybeUninit};
|
||||
use crate::mem::{self, MaybeUninit, SizedTypeProperties};
|
||||
use crate::ptr;
|
||||
|
||||
/// Rotates the range `[mid-left, mid+right)` such that the element at `mid` becomes the first
|
||||
@ -63,7 +63,7 @@
|
||||
/// when `left < right` the swapping happens from the left instead.
|
||||
pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize) {
|
||||
type BufType = [usize; 32];
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
return;
|
||||
}
|
||||
loop {
|
||||
|
@ -7,7 +7,7 @@
|
||||
//! stable sorting implementation.
|
||||
|
||||
use crate::cmp;
|
||||
use crate::mem::{self, MaybeUninit};
|
||||
use crate::mem::{self, MaybeUninit, SizedTypeProperties};
|
||||
use crate::ptr;
|
||||
|
||||
/// When dropped, copies from `src` into `dest`.
|
||||
@ -813,7 +813,7 @@ pub fn quicksort<T, F>(v: &mut [T], mut is_less: F)
|
||||
F: FnMut(&T, &T) -> bool,
|
||||
{
|
||||
// Sorting has no meaningful behavior on zero-sized types.
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -898,7 +898,7 @@ pub fn partition_at_index<T, F>(
|
||||
panic!("partition_at_index index {} greater than length of slice {}", index, v.len());
|
||||
}
|
||||
|
||||
if mem::size_of::<T>() == 0 {
|
||||
if T::IS_ZST {
|
||||
// Sorting has no meaningful behavior on zero-sized types. Do nothing.
|
||||
} else if index == v.len() - 1 {
|
||||
// Find max element and place it in the last position of the array. We're free to use
|
||||
|
Loading…
Reference in New Issue
Block a user