Rename SimdArray to Vector, remove its generic parameter, and remove LanesAtMost32

This commit is contained in:
Caleb Zulawski 2021-06-25 03:34:10 +00:00
parent 732b7edfab
commit c077bf3c07
23 changed files with 450 additions and 505 deletions

View File

@ -1,253 +0,0 @@
use crate::intrinsics;
use crate::masks::*;
use crate::vector::ptr::{SimdConstPtr, SimdMutPtr};
use crate::vector::*;
/// A representation of a vector as an "array" with indices, implementing
/// operations applicable to any vector type based solely on "having lanes",
/// and describing relationships between vector and scalar types.
pub trait SimdArray<const LANES: usize>: crate::LanesAtMost32
where
SimdUsize<LANES>: crate::LanesAtMost32,
SimdIsize<LANES>: crate::LanesAtMost32,
MaskSize<LANES>: crate::Mask,
Self: Sized,
{
/// The scalar type in every lane of this vector type.
type Scalar: Copy + Sized;
/// The number of lanes for this vector.
const LANES: usize = LANES;
/// Generates a SIMD vector with the same value in every lane.
#[must_use]
fn splat(val: Self::Scalar) -> Self;
/// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
/// If an index is out of bounds, that lane instead selects the value from the "or" vector.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 5]);
/// let alt = SimdI32::from_array([-5, -4, -3, -2]);
///
/// let result = SimdI32::<4>::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds.
/// assert_eq!(result, SimdI32::from_array([-5, 13, 10, 15]));
/// ```
#[must_use]
#[inline]
fn gather_or(slice: &[Self::Scalar], idxs: SimdUsize<LANES>, or: Self) -> Self {
Self::gather_select(slice, MaskSize::splat(true), idxs, or)
}
/// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
/// Out-of-bounds indices instead use the default value for that lane (0).
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 5]);
///
/// let result = SimdI32::<4>::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds.
/// assert_eq!(result, SimdI32::from_array([0, 13, 10, 15]));
/// ```
#[must_use]
#[inline]
fn gather_or_default(slice: &[Self::Scalar], idxs: SimdUsize<LANES>) -> Self
where
Self::Scalar: Default,
{
Self::gather_or(slice, idxs, Self::splat(Self::Scalar::default()))
}
/// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
/// Out-of-bounds or masked indices instead select the value from the "or" vector.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 5]);
/// let alt = SimdI32::from_array([-5, -4, -3, -2]);
/// let mask = MaskSize::from_array([true, true, true, false]); // Note the mask of the last lane.
///
/// let result = SimdI32::<4>::gather_select(&vec, mask, idxs, alt); // Note the lane that is out-of-bounds.
/// assert_eq!(result, SimdI32::from_array([-5, 13, 10, -2]));
/// ```
#[must_use]
#[inline]
fn gather_select(
slice: &[Self::Scalar],
mask: MaskSize<LANES>,
idxs: SimdUsize<LANES>,
or: Self,
) -> Self {
let mask = (mask & idxs.lanes_lt(SimdUsize::splat(slice.len()))).to_int();
let base_ptr = SimdConstPtr::splat(slice.as_ptr());
// Ferris forgive me, I have done pointer arithmetic here.
let ptrs = base_ptr.wrapping_add(idxs);
// SAFETY: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah
unsafe { intrinsics::simd_gather(or, ptrs, mask) }
}
/// SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices.
/// Out-of-bounds indices are not written.
/// `scatter` writes "in order", so if an index receives two writes, only the last is guaranteed.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 0]);
/// let vals = SimdI32::from_array([-27, 82, -41, 124]);
///
/// vals.scatter(&mut vec, idxs); // index 0 receives two writes.
/// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
#[inline]
fn scatter(self, slice: &mut [Self::Scalar], idxs: SimdUsize<LANES>) {
self.scatter_select(slice, MaskSize::splat(true), idxs)
}
/// SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices.
/// Out-of-bounds or masked indices are not written.
/// `scatter_select` writes "in order", so if an index receives two writes, only the last is guaranteed.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 0]);
/// let vals = SimdI32::from_array([-27, 82, -41, 124]);
/// let mask = MaskSize::from_array([true, true, true, false]); // Note the mask of the last lane.
///
/// vals.scatter_select(&mut vec, mask, idxs); // index 0's second write is masked, thus omitted.
/// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
#[inline]
fn scatter_select(
self,
slice: &mut [Self::Scalar],
mask: MaskSize<LANES>,
idxs: SimdUsize<LANES>,
) {
// We must construct our scatter mask before we derive a pointer!
let mask = (mask & idxs.lanes_lt(SimdUsize::splat(slice.len()))).to_int();
// SAFETY: This block works with *mut T derived from &mut 'a [T],
// which means it is delicate in Rust's borrowing model, circa 2021:
// &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts!
// Even though this block is largely safe methods, it must be almost exactly this way
// to prevent invalidating the raw ptrs while they're live.
// Thus, entering this block requires all values to use being already ready:
// 0. idxs we want to write to, which are used to construct the mask.
// 1. mask, which depends on an initial &'a [T] and the idxs.
// 2. actual values to scatter (self).
// 3. &mut [T] which will become our base ptr.
unsafe {
// Now Entering ☢️ *mut T Zone
let base_ptr = SimdMutPtr::splat(slice.as_mut_ptr());
// Ferris forgive me, I have done pointer arithmetic here.
let ptrs = base_ptr.wrapping_add(idxs);
// The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
intrinsics::simd_scatter(self, ptrs, mask)
// Cleared ☢️ *mut T Zone
}
}
}
macro_rules! impl_simdarray_for {
($simd:ident {type Scalar = $scalar:ident;}) => {
impl<const LANES: usize> SimdArray<LANES> for $simd<LANES>
where SimdUsize<LANES>: crate::LanesAtMost32,
SimdIsize<LANES>: crate::LanesAtMost32,
MaskSize<LANES>: crate::Mask,
Self: crate::LanesAtMost32,
{
type Scalar = $scalar;
#[must_use]
#[inline]
fn splat(val: Self::Scalar) -> Self {
[val; LANES].into()
}
}
};
($simd:ident $impl:tt) => {
impl<const LANES: usize> SimdArray<LANES> for $simd<LANES>
where SimdUsize<LANES>: crate::LanesAtMost32,
SimdIsize<LANES>: crate::LanesAtMost32,
MaskSize<LANES>: crate::Mask,
Self: crate::LanesAtMost32,
$impl
}
}
impl_simdarray_for! {
SimdUsize {
type Scalar = usize;
}
}
impl_simdarray_for! {
SimdIsize {
type Scalar = isize;
}
}
impl_simdarray_for! {
SimdI8 {
type Scalar = i8;
}
}
impl_simdarray_for! {
SimdI16 {
type Scalar = i16;
}
}
impl_simdarray_for! {
SimdI32 {
type Scalar = i32;
}
}
impl_simdarray_for! {
SimdI64 {
type Scalar = i64;
}
}
impl_simdarray_for! {
SimdU8 {
type Scalar = u8;
}
}
impl_simdarray_for! {
SimdU16 {
type Scalar = u16;
}
}
impl_simdarray_for! {
SimdU32 {
type Scalar = u32;
}
}
impl_simdarray_for! {
SimdU64 {
type Scalar = u64;
}
}
impl_simdarray_for! {
SimdF32 {
type Scalar = f32;
}
}
impl_simdarray_for! {
SimdF64 {
type Scalar = f64;
}
}

View File

@ -1,12 +1,12 @@
use crate::LanesAtMost32;
use crate::Vector;
macro_rules! implement_mask_ops {
{ $($vector:ident => $mask:ident ($inner_ty:ident),)* } => {
$(
impl<const LANES: usize> crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$inner_ty<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
crate::$inner_ty<LANES>: Vector,
crate::$mask<LANES>: crate::Mask,
{
/// Test if each lane is equal to the corresponding lane in `other`.

View File

@ -1,7 +1,7 @@
/// Implements common traits on the specified vector `$name`, holding multiple `$lanes` of `$type`.
macro_rules! impl_vector {
{ $name:ident, $type:ty } => {
impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> $name<LANES> where Self: crate::Vector {
/// Construct a SIMD vector by setting all lanes to the given value.
pub const fn splat(value: $type) -> Self {
Self([value; LANES])
@ -44,23 +44,159 @@ macro_rules! impl_vector {
}
}
impl<const LANES: usize> Copy for $name<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> $name<LANES>
where
Self: crate::Vector,
crate::MaskSize<LANES>: crate::Mask,
crate::SimdIsize<LANES>: crate::Vector,
crate::SimdUsize<LANES>: crate::Vector,
{
/// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
/// If an index is out of bounds, that lane instead selects the value from the "or" vector.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 5]);
/// let alt = SimdI32::from_array([-5, -4, -3, -2]);
///
/// let result = SimdI32::<4>::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds.
/// assert_eq!(result, SimdI32::from_array([-5, 13, 10, 15]));
/// ```
#[must_use]
#[inline]
pub fn gather_or(slice: &[$type], idxs: crate::SimdUsize<LANES>, or: Self) -> Self {
Self::gather_select(slice, crate::MaskSize::splat(true), idxs, or)
}
impl<const LANES: usize> Clone for $name<LANES> where Self: crate::LanesAtMost32 {
/// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
/// Out-of-bounds indices instead use the default value for that lane (0).
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 5]);
///
/// let result = SimdI32::<4>::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds.
/// assert_eq!(result, SimdI32::from_array([0, 13, 10, 15]));
/// ```
#[must_use]
#[inline]
pub fn gather_or_default(slice: &[$type], idxs: crate::SimdUsize<LANES>) -> Self {
Self::gather_or(slice, idxs, Self::splat(<$type>::default()))
}
/// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
/// Out-of-bounds or masked indices instead select the value from the "or" vector.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 5]);
/// let alt = SimdI32::from_array([-5, -4, -3, -2]);
/// let mask = MaskSize::from_array([true, true, true, false]); // Note the mask of the last lane.
///
/// let result = SimdI32::<4>::gather_select(&vec, mask, idxs, alt); // Note the lane that is out-of-bounds.
/// assert_eq!(result, SimdI32::from_array([-5, 13, 10, -2]));
/// ```
#[must_use]
#[inline]
pub fn gather_select(
slice: &[$type],
mask: crate::MaskSize<LANES>,
idxs: crate::SimdUsize<LANES>,
or: Self,
) -> Self
{
let mask = (mask & idxs.lanes_lt(crate::SimdUsize::splat(slice.len()))).to_int();
let base_ptr = crate::vector::ptr::SimdConstPtr::splat(slice.as_ptr());
// Ferris forgive me, I have done pointer arithmetic here.
let ptrs = base_ptr.wrapping_add(idxs);
// SAFETY: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah
unsafe { crate::intrinsics::simd_gather(or, ptrs, mask) }
}
/// SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices.
/// Out-of-bounds indices are not written.
/// `scatter` writes "in order", so if an index receives two writes, only the last is guaranteed.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 0]);
/// let vals = SimdI32::from_array([-27, 82, -41, 124]);
///
/// vals.scatter(&mut vec, idxs); // index 0 receives two writes.
/// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
#[inline]
pub fn scatter(self, slice: &mut [$type], idxs: crate::SimdUsize<LANES>) {
self.scatter_select(slice, crate::MaskSize::splat(true), idxs)
}
/// SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices.
/// Out-of-bounds or masked indices are not written.
/// `scatter_select` writes "in order", so if an index receives two writes, only the last is guaranteed.
/// ```
/// # #![feature(portable_simd)]
/// # use core_simd::*;
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = SimdUsize::<4>::from_array([9, 3, 0, 0]);
/// let vals = SimdI32::from_array([-27, 82, -41, 124]);
/// let mask = MaskSize::from_array([true, true, true, false]); // Note the mask of the last lane.
///
/// vals.scatter_select(&mut vec, mask, idxs); // index 0's second write is masked, thus omitted.
/// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
/// ```
#[inline]
pub fn scatter_select(
self,
slice: &mut [$type],
mask: crate::MaskSize<LANES>,
idxs: crate::SimdUsize<LANES>,
)
{
// We must construct our scatter mask before we derive a pointer!
let mask = (mask & idxs.lanes_lt(crate::SimdUsize::splat(slice.len()))).to_int();
// SAFETY: This block works with *mut T derived from &mut 'a [T],
// which means it is delicate in Rust's borrowing model, circa 2021:
// &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts!
// Even though this block is largely safe methods, it must be almost exactly this way
// to prevent invalidating the raw ptrs while they're live.
// Thus, entering this block requires all values to use being already ready:
// 0. idxs we want to write to, which are used to construct the mask.
// 1. mask, which depends on an initial &'a [T] and the idxs.
// 2. actual values to scatter (self).
// 3. &mut [T] which will become our base ptr.
unsafe {
// Now Entering ☢️ *mut T Zone
let base_ptr = crate::vector::ptr::SimdMutPtr::splat(slice.as_mut_ptr());
// Ferris forgive me, I have done pointer arithmetic here.
let ptrs = base_ptr.wrapping_add(idxs);
// The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
crate::intrinsics::simd_scatter(self, ptrs, mask)
// Cleared ☢️ *mut T Zone
}
}
}
impl<const LANES: usize> Copy for $name<LANES> where Self: crate::Vector {}
impl<const LANES: usize> Clone for $name<LANES> where Self: crate::Vector {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> Default for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> Default for $name<LANES> where Self: crate::Vector {
#[inline]
fn default() -> Self {
Self::splat(<$type>::default())
}
}
impl<const LANES: usize> PartialEq for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> PartialEq for $name<LANES> where Self: crate::Vector {
#[inline]
fn eq(&self, other: &Self) -> bool {
// TODO use SIMD equality
@ -68,7 +204,7 @@ macro_rules! impl_vector {
}
}
impl<const LANES: usize> PartialOrd for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> PartialOrd for $name<LANES> where Self: crate::Vector {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
// TODO use SIMD equalitya
@ -77,14 +213,14 @@ macro_rules! impl_vector {
}
// array references
impl<const LANES: usize> AsRef<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> AsRef<[$type; LANES]> for $name<LANES> where Self: crate::Vector {
#[inline]
fn as_ref(&self) -> &[$type; LANES] {
&self.0
}
}
impl<const LANES: usize> AsMut<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> AsMut<[$type; LANES]> for $name<LANES> where Self: crate::Vector {
#[inline]
fn as_mut(&mut self) -> &mut [$type; LANES] {
&mut self.0
@ -92,14 +228,14 @@ macro_rules! impl_vector {
}
// slice references
impl<const LANES: usize> AsRef<[$type]> for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> AsRef<[$type]> for $name<LANES> where Self: crate::Vector {
#[inline]
fn as_ref(&self) -> &[$type] {
&self.0
}
}
impl<const LANES: usize> AsMut<[$type]> for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> AsMut<[$type]> for $name<LANES> where Self: crate::Vector {
#[inline]
fn as_mut(&mut self) -> &mut [$type] {
&mut self.0
@ -107,13 +243,13 @@ macro_rules! impl_vector {
}
// vector/array conversion
impl<const LANES: usize> From<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> From<[$type; LANES]> for $name<LANES> where Self: crate::Vector {
fn from(array: [$type; LANES]) -> Self {
Self(array)
}
}
impl <const LANES: usize> From<$name<LANES>> for [$type; LANES] where $name<LANES>: crate::LanesAtMost32 {
impl <const LANES: usize> From<$name<LANES>> for [$type; LANES] where $name<LANES>: crate::Vector {
fn from(vector: $name<LANES>) -> Self {
vector.to_array()
}

View File

@ -35,7 +35,7 @@ macro_rules! impl_fmt_trait {
$( // repeat trait
impl<const LANES: usize> core::fmt::$trait for crate::$type<LANES>
where
Self: crate::LanesAtMost32,
Self: crate::Vector,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
$format(self.as_ref(), f)

View File

@ -2,7 +2,7 @@ macro_rules! impl_traits {
{ $type:ident } => {
impl<const LANES: usize> core::iter::Sum<Self> for crate::$type<LANES>
where
Self: crate::LanesAtMost32,
Self: crate::Vector,
{
fn sum<I: core::iter::Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Default::default(), core::ops::Add::add)
@ -11,7 +11,7 @@ macro_rules! impl_traits {
impl<const LANES: usize> core::iter::Product<Self> for crate::$type<LANES>
where
Self: crate::LanesAtMost32,
Self: crate::Vector,
{
fn product<I: core::iter::Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Default::default(), core::ops::Mul::mul)
@ -20,7 +20,7 @@ macro_rules! impl_traits {
impl<'a, const LANES: usize> core::iter::Sum<&'a Self> for crate::$type<LANES>
where
Self: crate::LanesAtMost32,
Self: crate::Vector,
{
fn sum<I: core::iter::Iterator<Item = &'a Self>>(iter: I) -> Self {
iter.fold(Default::default(), core::ops::Add::add)
@ -29,7 +29,7 @@ macro_rules! impl_traits {
impl<'a, const LANES: usize> core::iter::Product<&'a Self> for crate::$type<LANES>
where
Self: crate::LanesAtMost32,
Self: crate::Vector,
{
fn product<I: core::iter::Iterator<Item = &'a Self>>(iter: I) -> Self {
iter.fold(Default::default(), core::ops::Mul::mul)

View File

@ -1,54 +0,0 @@
/// Implemented for vectors that are supported by the implementation.
pub trait LanesAtMost32: sealed::Sealed {
#[doc(hidden)]
type BitMask: Into<u64>;
}
mod sealed {
pub trait Sealed {}
}
macro_rules! impl_for {
{ $name:ident } => {
impl<const LANES: usize> sealed::Sealed for $name<LANES>
where
$name<LANES>: LanesAtMost32,
{}
impl LanesAtMost32 for $name<1> {
type BitMask = u8;
}
impl LanesAtMost32 for $name<2> {
type BitMask = u8;
}
impl LanesAtMost32 for $name<4> {
type BitMask = u8;
}
impl LanesAtMost32 for $name<8> {
type BitMask = u8;
}
impl LanesAtMost32 for $name<16> {
type BitMask = u16;
}
impl LanesAtMost32 for $name<32> {
type BitMask = u32;
}
}
}
use crate::*;
impl_for! { SimdU8 }
impl_for! { SimdU16 }
impl_for! { SimdU32 }
impl_for! { SimdU64 }
impl_for! { SimdUsize }
impl_for! { SimdI8 }
impl_for! { SimdI16 }
impl_for! { SimdI32 }
impl_for! { SimdI64 }
impl_for! { SimdIsize }
impl_for! { SimdF32 }
impl_for! { SimdF64 }

View File

@ -35,14 +35,8 @@ mod vendor;
mod math;
mod lanes_at_most_32;
pub use lanes_at_most_32::LanesAtMost32;
mod masks;
pub use masks::*;
mod vector;
pub use vector::*;
mod array;
pub use array::SimdArray;

View File

@ -3,11 +3,11 @@ use core::marker::PhantomData;
/// Helper trait for limiting int conversion types
pub trait ConvertToInt {}
impl<const LANES: usize> ConvertToInt for crate::SimdI8<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> ConvertToInt for crate::SimdI16<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> ConvertToInt for crate::SimdI32<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> ConvertToInt for crate::SimdI64<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> ConvertToInt for crate::SimdIsize<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> ConvertToInt for crate::SimdI8<LANES> where Self: crate::Vector {}
impl<const LANES: usize> ConvertToInt for crate::SimdI16<LANES> where Self: crate::Vector {}
impl<const LANES: usize> ConvertToInt for crate::SimdI32<LANES> where Self: crate::Vector {}
impl<const LANES: usize> ConvertToInt for crate::SimdI64<LANES> where Self: crate::Vector {}
impl<const LANES: usize> ConvertToInt for crate::SimdIsize<LANES> where Self: crate::Vector {}
/// A mask where each lane is represented by a single bit.
#[repr(transparent)]
@ -80,7 +80,7 @@ impl<T: Mask, const LANES: usize> BitMask<T, LANES> {
#[inline]
pub unsafe fn from_int_unchecked<V>(value: V) -> Self
where
V: crate::LanesAtMost32,
V: crate::Vector,
{
// TODO remove the transmute when rustc is more flexible
assert_eq!(
@ -184,8 +184,8 @@ macro_rules! impl_from {
$(
impl<const LANES: usize> From<$from<crate::$from<LANES>, LANES>> for $to<crate::$to<LANES>, LANES>
where
crate::$from_inner<LANES>: crate::LanesAtMost32,
crate::$to_inner<LANES>: crate::LanesAtMost32,
crate::$from_inner<LANES>: crate::Vector,
crate::$to_inner<LANES>: crate::Vector,
crate::$from<LANES>: crate::Mask,
crate::$to<LANES>: crate::Mask,
{

View File

@ -14,18 +14,18 @@ macro_rules! define_mask {
#[repr(transparent)]
pub struct $name<T: Mask, const $lanes: usize>(crate::$type<$lanes2>, PhantomData<T>)
where
crate::$type<LANES>: crate::LanesAtMost32;
crate::$type<LANES>: crate::Vector;
impl_full_mask_reductions! { $name, $type }
impl<T: Mask, const LANES: usize> Copy for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{}
impl<T: Mask, const LANES: usize> Clone for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
#[inline]
fn clone(&self) -> Self {
@ -35,7 +35,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> PartialEq for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
@ -44,7 +44,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> PartialOrd for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
@ -53,12 +53,12 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> Eq for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{}
impl<T: Mask, const LANES: usize> Ord for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.cmp(&other.0)
@ -67,7 +67,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
pub fn splat(value: bool) -> Self {
Self(
@ -154,7 +154,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> core::convert::From<$name<T, LANES>> for crate::$type<LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
fn from(value: $name<T, LANES>) -> Self {
value.0
@ -163,7 +163,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> core::ops::BitAnd for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
type Output = Self;
#[inline]
@ -174,7 +174,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> core::ops::BitOr for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
type Output = Self;
#[inline]
@ -185,7 +185,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> core::ops::BitXor for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
type Output = Self;
#[inline]
@ -196,7 +196,7 @@ macro_rules! define_mask {
impl<T: Mask, const LANES: usize> core::ops::Not for $name<T, LANES>
where
crate::$type<LANES>: crate::LanesAtMost32,
crate::$type<LANES>: crate::Vector,
{
type Output = Self;
#[inline]
@ -242,8 +242,8 @@ macro_rules! impl_from {
$(
impl<const LANES: usize, T, U> From<$from<T, LANES>> for $to<U, LANES>
where
crate::$from_inner<LANES>: crate::LanesAtMost32,
crate::$to_inner<LANES>: crate::LanesAtMost32,
crate::$from_inner<LANES>: crate::Vector,
crate::$to_inner<LANES>: crate::Vector,
T: crate::Mask,
U: crate::Mask,
{

View File

@ -12,7 +12,7 @@
)]
mod mask_impl;
use crate::{LanesAtMost32, SimdI16, SimdI32, SimdI64, SimdI8, SimdIsize};
use crate::{SimdI16, SimdI32, SimdI64, SimdI8, SimdIsize, Vector};
mod sealed {
pub trait Sealed {}
@ -38,12 +38,12 @@ macro_rules! define_opaque_mask {
#[allow(non_camel_case_types)]
pub struct $name<const LANES: usize>($inner_ty)
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask;
impl<const LANES: usize> sealed::Sealed for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{}
impl Mask for $name<1> {
@ -75,7 +75,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
/// Construct a mask by setting all lanes to the given value.
@ -188,7 +188,7 @@ macro_rules! define_opaque_mask {
// vector/array conversion
impl<const LANES: usize> From<[bool; LANES]> for $name<LANES>
where
$bits_ty<LANES>: crate::LanesAtMost32,
$bits_ty<LANES>: crate::Vector,
Self: Mask,
{
fn from(array: [bool; LANES]) -> Self {
@ -198,7 +198,7 @@ macro_rules! define_opaque_mask {
impl <const LANES: usize> From<$name<LANES>> for [bool; LANES]
where
$bits_ty<LANES>: crate::LanesAtMost32,
$bits_ty<LANES>: crate::Vector,
$name<LANES>: Mask,
{
fn from(vector: $name<LANES>) -> Self {
@ -208,13 +208,13 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> Copy for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{}
impl<const LANES: usize> Clone for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -225,7 +225,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> Default for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -236,7 +236,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> PartialEq for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -247,7 +247,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> PartialOrd for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -258,7 +258,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::fmt::Debug for $name<LANES>
where
$bits_ty<LANES>: crate::LanesAtMost32,
$bits_ty<LANES>: crate::Vector,
Self: Mask,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
@ -270,7 +270,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = Self;
@ -282,7 +282,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = Self;
@ -294,7 +294,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
$name<LANES>: Mask,
{
type Output = $name<LANES>;
@ -306,7 +306,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = Self;
@ -318,7 +318,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = Self;
@ -330,7 +330,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
$name<LANES>: Mask,
{
type Output = $name<LANES>;
@ -342,7 +342,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = Self;
@ -354,7 +354,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = Self;
@ -366,7 +366,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
$name<LANES>: Mask,
{
type Output = $name<LANES>;
@ -378,7 +378,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
type Output = $name<LANES>;
@ -390,7 +390,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -401,7 +401,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -412,7 +412,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -423,7 +423,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -434,7 +434,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -445,7 +445,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES>
where
$bits_ty<LANES>: LanesAtMost32,
$bits_ty<LANES>: Vector,
Self: Mask,
{
#[inline]
@ -555,8 +555,8 @@ macro_rules! impl_from {
$(
impl<const LANES: usize> From<$from<LANES>> for $to<LANES>
where
crate::$from_inner<LANES>: crate::LanesAtMost32,
crate::$to_inner<LANES>: crate::LanesAtMost32,
crate::$from_inner<LANES>: crate::Vector,
crate::$to_inner<LANES>: crate::Vector,
$from<LANES>: Mask,
Self: Mask,
{

View File

@ -1,6 +1,6 @@
macro_rules! impl_uint_arith {
($(($name:ident, $n:ident)),+) => {
$( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
$( impl<const LANES: usize> $name<LANES> where Self: crate::Vector {
/// Lanewise saturating add.
///
@ -44,7 +44,7 @@ macro_rules! impl_uint_arith {
macro_rules! impl_int_arith {
($(($name:ident, $n:ident)),+) => {
$( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
$( impl<const LANES: usize> $name<LANES> where Self: crate::Vector {
/// Lanewise saturating add.
///

View File

@ -1,4 +1,4 @@
use crate::LanesAtMost32;
use crate::Vector;
/// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
fn invalid_shift_rhs<T>(rhs: T) -> bool
@ -16,7 +16,7 @@ macro_rules! impl_ref_ops {
{
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
where
$($bound:path: LanesAtMost32,)*
$($bound:path: Vector,)*
{
type Output = $output:ty;
@ -26,7 +26,7 @@ macro_rules! impl_ref_ops {
} => {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
type Output = $output;
@ -36,7 +36,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
@ -48,7 +48,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
@ -60,7 +60,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
@ -75,7 +75,7 @@ macro_rules! impl_ref_ops {
{
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
where
$($bound:path: LanesAtMost32,)*
$($bound:path: Vector,)*
{
$(#[$attrs:meta])*
fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt
@ -83,7 +83,7 @@ macro_rules! impl_ref_ops {
} => {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
@ -91,7 +91,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
@ -104,7 +104,7 @@ macro_rules! impl_ref_ops {
{
impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty
where
$($bound:path: LanesAtMost32,)*
$($bound:path: Vector,)*
{
type Output = $output:ty;
fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
@ -112,7 +112,7 @@ macro_rules! impl_ref_ops {
} => {
impl<const $lanes: usize> core::ops::$trait for $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
type Output = $output;
fn $fn($self_tok) -> Self::Output $body
@ -120,7 +120,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait for &'_ $type
where
$($bound: LanesAtMost32,)*
$($bound: Vector,)*
{
type Output = <$type as core::ops::$trait>::Output;
fn $fn($self_tok) -> Self::Output {
@ -167,7 +167,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Not for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
type Output = Self;
fn not(self) -> Self::Output {
@ -181,7 +181,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
type Output = Self;
fn neg(self) -> Self::Output {
@ -194,7 +194,7 @@ macro_rules! impl_op {
{ impl Index for $type:ident, $scalar:ty } => {
impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
where
Self: LanesAtMost32,
Self: Vector,
I: core::slice::SliceIndex<[$scalar]>,
{
type Output = I::Output;
@ -206,7 +206,7 @@ macro_rules! impl_op {
impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
where
Self: LanesAtMost32,
Self: Vector,
I: core::slice::SliceIndex<[$scalar]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
@ -221,7 +221,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<Self> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
type Output = Self;
@ -237,7 +237,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<$scalar> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
type Output = Self;
@ -251,7 +251,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<crate::$type<LANES>> for $scalar
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
type Output = crate::$type<LANES>;
@ -265,7 +265,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$assign_trait<Self> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
#[inline]
fn $assign_trait_fn(&mut self, rhs: Self) {
@ -279,7 +279,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$assign_trait<$scalar> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost32,
crate::$type<LANES>: Vector,
{
#[inline]
fn $assign_trait_fn(&mut self, rhs: $scalar) {
@ -325,7 +325,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -353,7 +353,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -376,7 +376,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<crate::$vector<LANES>> for $scalar
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = crate::$vector<LANES>;
@ -390,7 +390,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::DivAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn div_assign(&mut self, rhs: Self) {
@ -402,7 +402,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::DivAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn div_assign(&mut self, rhs: $scalar) {
@ -415,7 +415,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -443,7 +443,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -466,7 +466,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<crate::$vector<LANES>> for $scalar
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = crate::$vector<LANES>;
@ -480,7 +480,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::RemAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn rem_assign(&mut self, rhs: Self) {
@ -492,7 +492,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::RemAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn rem_assign(&mut self, rhs: $scalar) {
@ -505,7 +505,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shl<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -527,7 +527,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shl<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -546,7 +546,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShlAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn shl_assign(&mut self, rhs: Self) {
@ -558,7 +558,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShlAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn shl_assign(&mut self, rhs: $scalar) {
@ -570,7 +570,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shr<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -592,7 +592,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shr<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
type Output = Self;
@ -611,7 +611,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShrAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn shr_assign(&mut self, rhs: Self) {
@ -623,7 +623,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShrAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$vector<LANES>: Vector,
{
#[inline]
fn shr_assign(&mut self, rhs: $scalar) {

View File

@ -2,7 +2,7 @@ macro_rules! impl_integer_reductions {
{ $name:ident, $scalar:ty } => {
impl<const LANES: usize> crate::$name<LANES>
where
Self: crate::LanesAtMost32
Self: crate::Vector
{
/// Horizontal wrapping add. Returns the sum of the lanes of the vector, with wrapping addition.
#[inline]
@ -56,7 +56,7 @@ macro_rules! impl_float_reductions {
{ $name:ident, $scalar:ty } => {
impl<const LANES: usize> crate::$name<LANES>
where
Self: crate::LanesAtMost32
Self: crate::Vector
{
/// Horizontal add. Returns the sum of the lanes of the vector.
@ -106,7 +106,7 @@ macro_rules! impl_full_mask_reductions {
{ $name:ident, $bits_ty:ident } => {
impl<T: crate::Mask, const LANES: usize> $name<T, LANES>
where
crate::$bits_ty<LANES>: crate::LanesAtMost32
crate::$bits_ty<LANES>: crate::Vector
{
#[inline]
pub fn any(self) -> bool {
@ -125,7 +125,7 @@ macro_rules! impl_opaque_mask_reductions {
{ $name:ident, $bits_ty:ident } => {
impl<const LANES: usize> $name<LANES>
where
crate::$bits_ty<LANES>: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::Vector,
$name<LANES>: crate::Mask,
{
/// Returns true if any lane is set, or false otherwise.

View File

@ -5,7 +5,7 @@ macro_rules! implement {
#[cfg(feature = "std")]
impl<const LANES: usize> crate::$type<LANES>
where
Self: crate::LanesAtMost32,
Self: crate::Vector,
{
/// Returns the smallest integer greater than or equal to each lane.
#[must_use = "method returns a new vector and does not mutate the original value"]
@ -45,8 +45,8 @@ macro_rules! implement {
impl<const LANES: usize> crate::$type<LANES>
where
Self: crate::LanesAtMost32,
crate::$int_type<LANES>: crate::LanesAtMost32,
Self: crate::Vector,
crate::$int_type<LANES>: crate::Vector,
{
/// Rounds toward zero and converts to the same-width integer type, assuming that
/// the value is finite and fits in that type.

View File

@ -14,12 +14,12 @@ macro_rules! impl_select {
$mask:ident ($bits_ty:ident): $($type:ident),*
} => {
$(
impl<const LANES: usize> Sealed for crate::$type<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> Sealed for crate::$type<LANES> where Self: crate::Vector {}
impl<const LANES: usize> Select<crate::$mask<LANES>> for crate::$type<LANES>
where
crate::$mask<LANES>: crate::Mask,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
Self: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::Vector,
Self: crate::Vector,
{
#[doc(hidden)]
#[inline]
@ -32,12 +32,12 @@ macro_rules! impl_select {
impl<const LANES: usize> Sealed for crate::$mask<LANES>
where
Self: crate::Mask,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::Vector,
{}
impl<const LANES: usize> Select<Self> for crate::$mask<LANES>
where
Self: crate::Mask,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::Vector,
{
#[doc(hidden)]
#[inline]
@ -49,7 +49,7 @@ macro_rules! impl_select {
impl<const LANES: usize> crate::$mask<LANES>
where
Self: crate::Mask,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::Vector,
{
/// Choose lanes from two vectors.
///

View File

@ -18,11 +18,11 @@ pub trait ToBytes: Sealed {
macro_rules! impl_to_bytes {
{ $name:ident, $($int_width:literal -> $byte_width:literal),* } => {
$(
impl Sealed for crate::$name<$int_width> where Self: crate::LanesAtMost32 {}
impl Sealed for crate::$name<$int_width> where Self: crate::Vector {}
impl ToBytes for crate::$name<$int_width>
where
Self: crate::LanesAtMost32,
crate::SimdU8<$byte_width>: crate::LanesAtMost32,
Self: crate::Vector,
crate::SimdU8<$byte_width>: crate::Vector,
{
type Bytes = crate::SimdU8<$byte_width>;
fn to_bytes_impl(self) -> Self::Bytes {
@ -36,7 +36,7 @@ macro_rules! impl_to_bytes {
impl<const LANES: usize> crate::$name<LANES>
where
Self: ToBytes + crate::LanesAtMost32,
Self: ToBytes + crate::Vector,
{
/// Return the memory representation of this integer as a byte array in native byte
/// order.

View File

@ -1,10 +0,0 @@
mod float;
mod int;
mod uint;
pub use float::*;
pub use int::*;
pub use uint::*;
// Vectors of pointers are not for public use at the current time.
pub(crate) mod ptr;

View File

@ -10,8 +10,8 @@ macro_rules! impl_float_vector {
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
Self: crate::Vector,
crate::$bits_ty<LANES>: crate::Vector,
{
/// Raw transmutation to an unsigned integer vector type with the
/// same size and number of lanes.
@ -78,9 +78,9 @@ macro_rules! impl_float_vector {
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
crate::$mask_impl_ty<LANES>: crate::LanesAtMost32,
Self: crate::Vector,
crate::$bits_ty<LANES>: crate::Vector,
crate::$mask_impl_ty<LANES>: crate::Vector,
crate::$mask_ty<LANES>: crate::Mask,
{
/// Returns true for each lane if it has a positive sign, including
@ -197,7 +197,7 @@ macro_rules! impl_float_vector {
#[repr(simd)]
pub struct SimdF32<const LANES: usize>([f32; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_float_vector! { SimdF32, f32, SimdU32, Mask32, SimdI32 }
@ -205,7 +205,7 @@ impl_float_vector! { SimdF32, f32, SimdU32, Mask32, SimdI32 }
#[repr(simd)]
pub struct SimdF64<const LANES: usize>([f64; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_float_vector! { SimdF64, f64, SimdU64, Mask64, SimdI64 }

View File

@ -6,9 +6,9 @@ macro_rules! impl_integer_vector {
impl_vector! { $name, $type }
impl_integer_reductions! { $name, $type }
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::Vector {}
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::Vector {
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
// TODO use SIMD cmp
@ -16,7 +16,7 @@ macro_rules! impl_integer_vector {
}
}
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::Vector {
#[inline]
fn hash<H>(&self, state: &mut H)
where
@ -28,8 +28,8 @@ macro_rules! impl_integer_vector {
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost32,
crate::$mask_impl_ty<LANES>: crate::LanesAtMost32,
Self: crate::Vector,
crate::$mask_impl_ty<LANES>: crate::Vector,
crate::$mask_ty<LANES>: crate::Mask,
{
/// Returns true for each positive lane and false if it is zero or negative.
@ -63,7 +63,7 @@ macro_rules! impl_integer_vector {
#[repr(simd)]
pub struct SimdIsize<const LANES: usize>([isize; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_integer_vector! { SimdIsize, isize, MaskSize, SimdIsize }
@ -71,7 +71,7 @@ impl_integer_vector! { SimdIsize, isize, MaskSize, SimdIsize }
#[repr(simd)]
pub struct SimdI16<const LANES: usize>([i16; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_integer_vector! { SimdI16, i16, Mask16, SimdI16 }
@ -79,7 +79,7 @@ impl_integer_vector! { SimdI16, i16, Mask16, SimdI16 }
#[repr(simd)]
pub struct SimdI32<const LANES: usize>([i32; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_integer_vector! { SimdI32, i32, Mask32, SimdI32 }
@ -87,7 +87,7 @@ impl_integer_vector! { SimdI32, i32, Mask32, SimdI32 }
#[repr(simd)]
pub struct SimdI64<const LANES: usize>([i64; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_integer_vector! { SimdI64, i64, Mask64, SimdI64 }
@ -95,7 +95,7 @@ impl_integer_vector! { SimdI64, i64, Mask64, SimdI64 }
#[repr(simd)]
pub struct SimdI8<const LANES: usize>([i8; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_integer_vector! { SimdI8, i8, Mask8, SimdI8 }

View File

@ -0,0 +1,132 @@
mod float;
mod int;
mod uint;
pub use float::*;
pub use int::*;
pub use uint::*;
// Vectors of pointers are not for public use at the current time.
pub(crate) mod ptr;
mod sealed {
pub trait Sealed {}
}
/// A representation of a vector as an "array" with indices, implementing
/// operations applicable to any vector type based solely on "having lanes",
/// and describing relationships between vector and scalar types.
pub trait Vector: sealed::Sealed {
/// The scalar type in every lane of this vector type.
type Scalar: Copy + Sized;
/// The number of lanes for this vector.
const LANES: usize;
// Implementation detail until the compiler can support bitmasks of any integer width
#[doc(hidden)]
type BitMask: Into<u64>;
/// Generates a SIMD vector with the same value in every lane.
#[must_use]
fn splat(val: Self::Scalar) -> Self;
}
macro_rules! impl_vector_for {
($simd:ident {type Scalar = $scalar:ident;}) => {
impl_vector_for! { $simd<1> { type Scalar = $scalar; type BitMask = u8; } }
impl_vector_for! { $simd<2> { type Scalar = $scalar; type BitMask = u8; } }
impl_vector_for! { $simd<4> { type Scalar = $scalar; type BitMask = u8; } }
impl_vector_for! { $simd<8> { type Scalar = $scalar; type BitMask = u8; } }
impl_vector_for! { $simd<16> { type Scalar = $scalar; type BitMask = u16; } }
impl_vector_for! { $simd<32> { type Scalar = $scalar; type BitMask = u32; } }
};
($simd:ident<$lanes:literal> {type Scalar = $scalar:ident; type BitMask = $bitmask:ident; }) => {
impl sealed::Sealed for $simd<$lanes> {}
impl Vector for $simd<$lanes> {
type Scalar = $scalar;
const LANES: usize = $lanes;
type BitMask = $bitmask;
#[inline]
fn splat(val: Self::Scalar) -> Self {
[val; $lanes].into()
}
}
};
}
impl_vector_for! {
SimdUsize {
type Scalar = usize;
}
}
impl_vector_for! {
SimdIsize {
type Scalar = isize;
}
}
impl_vector_for! {
SimdI8 {
type Scalar = i8;
}
}
impl_vector_for! {
SimdI16 {
type Scalar = i16;
}
}
impl_vector_for! {
SimdI32 {
type Scalar = i32;
}
}
impl_vector_for! {
SimdI64 {
type Scalar = i64;
}
}
impl_vector_for! {
SimdU8 {
type Scalar = u8;
}
}
impl_vector_for! {
SimdU16 {
type Scalar = u16;
}
}
impl_vector_for! {
SimdU32 {
type Scalar = u32;
}
}
impl_vector_for! {
SimdU64 {
type Scalar = u64;
}
}
impl_vector_for! {
SimdF32 {
type Scalar = f32;
}
}
impl_vector_for! {
SimdF64 {
type Scalar = f64;
}
}

View File

@ -9,7 +9,7 @@ pub(crate) struct SimdConstPtr<T, const LANES: usize>([*const T; LANES]);
impl<T, const LANES: usize> SimdConstPtr<T, LANES>
where
SimdUsize<LANES>: crate::LanesAtMost32,
SimdUsize<LANES>: crate::Vector,
T: Sized,
{
#[inline]
@ -35,7 +35,7 @@ pub(crate) struct SimdMutPtr<T, const LANES: usize>([*mut T; LANES]);
impl<T, const LANES: usize> SimdMutPtr<T, LANES>
where
SimdUsize<LANES>: crate::LanesAtMost32,
SimdUsize<LANES>: crate::Vector,
T: Sized,
{
#[inline]

View File

@ -6,9 +6,9 @@ macro_rules! impl_unsigned_vector {
impl_vector! { $name, $type }
impl_integer_reductions! { $name, $type }
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::Vector {}
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::Vector {
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
// TODO use SIMD cmp
@ -16,7 +16,7 @@ macro_rules! impl_unsigned_vector {
}
}
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost32 {
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::Vector {
#[inline]
fn hash<H>(&self, state: &mut H)
where
@ -32,7 +32,7 @@ macro_rules! impl_unsigned_vector {
#[repr(simd)]
pub struct SimdUsize<const LANES: usize>([usize; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_unsigned_vector! { SimdUsize, usize }
@ -40,7 +40,7 @@ impl_unsigned_vector! { SimdUsize, usize }
#[repr(simd)]
pub struct SimdU16<const LANES: usize>([u16; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_unsigned_vector! { SimdU16, u16 }
@ -48,7 +48,7 @@ impl_unsigned_vector! { SimdU16, u16 }
#[repr(simd)]
pub struct SimdU32<const LANES: usize>([u32; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_unsigned_vector! { SimdU32, u32 }
@ -56,7 +56,7 @@ impl_unsigned_vector! { SimdU32, u32 }
#[repr(simd)]
pub struct SimdU64<const LANES: usize>([u64; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_unsigned_vector! { SimdU64, u64 }
@ -64,7 +64,7 @@ impl_unsigned_vector! { SimdU64, u64 }
#[repr(simd)]
pub struct SimdU8<const LANES: usize>([u8; LANES])
where
Self: crate::LanesAtMost32;
Self: crate::Vector;
impl_unsigned_vector! { SimdU8, u8 }

View File

@ -335,18 +335,18 @@ macro_rules! test_lanes {
fn implementation<const $lanes: usize>()
where
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU8<$lanes>: core_simd::Vector,
core_simd::SimdU16<$lanes>: core_simd::Vector,
core_simd::SimdU32<$lanes>: core_simd::Vector,
core_simd::SimdU64<$lanes>: core_simd::Vector,
core_simd::SimdUsize<$lanes>: core_simd::Vector,
core_simd::SimdI8<$lanes>: core_simd::Vector,
core_simd::SimdI16<$lanes>: core_simd::Vector,
core_simd::SimdI32<$lanes>: core_simd::Vector,
core_simd::SimdI64<$lanes>: core_simd::Vector,
core_simd::SimdIsize<$lanes>: core_simd::Vector,
core_simd::SimdF32<$lanes>: core_simd::Vector,
core_simd::SimdF64<$lanes>: core_simd::Vector,
core_simd::Mask8<$lanes>: core_simd::Mask,
core_simd::Mask16<$lanes>: core_simd::Mask,
core_simd::Mask32<$lanes>: core_simd::Mask,
@ -409,18 +409,18 @@ macro_rules! test_lanes_panic {
fn implementation<const $lanes: usize>()
where
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU8<$lanes>: core_simd::Vector,
core_simd::SimdU16<$lanes>: core_simd::Vector,
core_simd::SimdU32<$lanes>: core_simd::Vector,
core_simd::SimdU64<$lanes>: core_simd::Vector,
core_simd::SimdUsize<$lanes>: core_simd::Vector,
core_simd::SimdI8<$lanes>: core_simd::Vector,
core_simd::SimdI16<$lanes>: core_simd::Vector,
core_simd::SimdI32<$lanes>: core_simd::Vector,
core_simd::SimdI64<$lanes>: core_simd::Vector,
core_simd::SimdIsize<$lanes>: core_simd::Vector,
core_simd::SimdF32<$lanes>: core_simd::Vector,
core_simd::SimdF64<$lanes>: core_simd::Vector,
core_simd::Mask8<$lanes>: core_simd::Mask,
core_simd::Mask16<$lanes>: core_simd::Mask,
core_simd::Mask32<$lanes>: core_simd::Mask,