Fix wrapping pointer arithmetic

This commit is contained in:
Caleb Zulawski 2022-06-24 00:13:36 -04:00
parent 6b3c599ba2
commit f10e591de1
4 changed files with 75 additions and 10 deletions

View File

@ -1,11 +1,14 @@
use super::sealed::Sealed;
use crate::simd::{LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
/// Operations on SIMD vectors of constant pointers.
pub trait SimdConstPtr: Copy + Sealed {
/// Vector of usize with the same number of lanes.
/// Vector of `usize` with the same number of lanes.
type Usize;
/// Vector of `isize` with the same number of lanes.
type Isize;
/// Vector of mutable pointers to the same type.
type MutPtr;
@ -23,10 +26,20 @@ pub trait SimdConstPtr: Copy + Sealed {
/// Equivalent to calling [`pointer::addr`] on each lane.
fn addr(self) -> Self::Usize;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
fn wrapping_offset(self, offset: Self::Isize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
fn wrapping_add(self, count: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
fn wrapping_sub(self, count: Self::Usize) -> Self;
}
impl<T, const LANES: usize> Sealed for Simd<*const T, LANES> where
@ -39,6 +52,7 @@ impl<T, const LANES: usize> SimdConstPtr for Simd<*const T, LANES>
LaneCount<LANES>: SupportedLaneCount,
{
type Usize = Simd<usize, LANES>;
type Isize = Simd<isize, LANES>;
type MutPtr = Simd<*mut T, LANES>;
type Mask = Mask<isize, LANES>;
@ -57,10 +71,19 @@ fn addr(self) -> Self::Usize {
self.cast()
}
#[inline]
fn wrapping_offset(self, count: Self::Isize) -> Self {
// Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
unsafe { intrinsics::simd_arith_offset(self, count) }
}
#[inline]
fn wrapping_add(self, count: Self::Usize) -> Self {
let addr = self.addr() + (count * Simd::splat(core::mem::size_of::<T>()));
// Safety: transmuting usize to pointers is safe, even if accessing those pointers isn't.
unsafe { core::mem::transmute_copy(&addr) }
self.wrapping_offset(count.cast())
}
#[inline]
fn wrapping_sub(self, count: Self::Usize) -> Self {
self.wrapping_offset(-count.cast::<isize>())
}
}

View File

@ -1,11 +1,14 @@
use super::sealed::Sealed;
use crate::simd::{LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
/// Operations on SIMD vectors of mutable pointers.
pub trait SimdMutPtr: Copy + Sealed {
/// Vector of usize with the same number of lanes.
/// Vector of `usize` with the same number of lanes.
type Usize;
/// Vector of `isize` with the same number of lanes.
type Isize;
/// Vector of constant pointers to the same type.
type ConstPtr;
@ -23,10 +26,20 @@ pub trait SimdMutPtr: Copy + Sealed {
/// Equivalent to calling [`pointer::addr`] on each lane.
fn addr(self) -> Self::Usize;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
fn wrapping_offset(self, offset: Self::Isize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
fn wrapping_add(self, count: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
fn wrapping_sub(self, count: Self::Usize) -> Self;
}
impl<T, const LANES: usize> Sealed for Simd<*mut T, LANES> where LaneCount<LANES>: SupportedLaneCount
@ -37,6 +50,7 @@ impl<T, const LANES: usize> SimdMutPtr for Simd<*mut T, LANES>
LaneCount<LANES>: SupportedLaneCount,
{
type Usize = Simd<usize, LANES>;
type Isize = Simd<isize, LANES>;
type ConstPtr = Simd<*const T, LANES>;
type Mask = Mask<isize, LANES>;
@ -55,10 +69,19 @@ fn addr(self) -> Self::Usize {
self.cast()
}
#[inline]
fn wrapping_offset(self, count: Self::Isize) -> Self {
// Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
unsafe { intrinsics::simd_arith_offset(self, count) }
}
#[inline]
fn wrapping_add(self, count: Self::Usize) -> Self {
let addr = self.addr() + (count * Simd::splat(core::mem::size_of::<T>()));
// Safety: transmuting usize to pointers is safe, even if accessing those pointers isn't.
unsafe { core::mem::transmute_copy(&addr) }
self.wrapping_offset(count.cast())
}
#[inline]
fn wrapping_sub(self, count: Self::Usize) -> Self {
self.wrapping_offset(-count.cast::<isize>())
}
}

View File

@ -151,4 +151,7 @@
pub(crate) fn simd_select<M, T>(m: M, yes: T, no: T) -> T;
#[allow(unused)]
pub(crate) fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
// equivalent to wrapping_offset
pub(crate) fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
}

View File

@ -21,6 +21,14 @@ fn addr<const LANES: usize>() {
);
}
fn wrapping_offset<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&Simd::<*$constness (), LANES>::wrapping_offset,
&<*$constness ()>::wrapping_offset,
&|_, _| true,
);
}
fn wrapping_add<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&Simd::<*$constness (), LANES>::wrapping_add,
@ -28,6 +36,14 @@ fn wrapping_add<const LANES: usize>() {
&|_, _| true,
);
}
fn wrapping_sub<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&Simd::<*$constness (), LANES>::wrapping_sub,
&<*$constness ()>::wrapping_sub,
&|_, _| true,
);
}
}
}
}