Merge pull request #278 from rust-lang/feature/simd-traits

Move element-specific functions to traits
This commit is contained in:
Caleb Zulawski 2022-05-31 14:26:30 -04:00 committed by GitHub
commit c44a608f5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 777 additions and 681 deletions

View File

@ -0,0 +1,11 @@
mod float;
mod int;
mod uint;
mod sealed {
pub trait Sealed {}
}
pub use float::*;
pub use int::*;
pub use uint::*;

View File

@ -0,0 +1,344 @@
use super::sealed::Sealed;
use crate::simd::{
intrinsics, LaneCount, Mask, Simd, SimdElement, SimdPartialEq, SimdPartialOrd,
SupportedLaneCount,
};
/// Operations on SIMD vectors of floats.
pub trait SimdFloat: Copy + Sealed {
/// Mask type used for manipulating this SIMD vector type.
type Mask;
/// Scalar type contained by this SIMD vector type.
type Scalar;
/// Bit representation of this SIMD vector type.
type Bits;
/// Raw transmutation to an unsigned integer vector type with the
/// same size and number of lanes.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn to_bits(self) -> Self::Bits;
/// Raw transmutation from an unsigned integer vector type with the
/// same size and number of lanes.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn from_bits(bits: Self::Bits) -> Self;
/// Produces a vector where every lane has the absolute value of the
/// equivalently-indexed lane in `self`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn abs(self) -> Self;
/// Takes the reciprocal (inverse) of each lane, `1/x`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn recip(self) -> Self;
/// Converts each lane from radians to degrees.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn to_degrees(self) -> Self;
/// Converts each lane from degrees to radians.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn to_radians(self) -> Self;
/// Returns true for each lane if it has a positive sign, including
/// `+0.0`, `NaN`s with positive sign bit and positive infinity.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_sign_positive(self) -> Self::Mask;
/// Returns true for each lane if it has a negative sign, including
/// `-0.0`, `NaN`s with negative sign bit and negative infinity.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_sign_negative(self) -> Self::Mask;
/// Returns true for each lane if its value is `NaN`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_nan(self) -> Self::Mask;
/// Returns true for each lane if its value is positive infinity or negative infinity.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_infinite(self) -> Self::Mask;
/// Returns true for each lane if its value is neither infinite nor `NaN`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_finite(self) -> Self::Mask;
/// Returns true for each lane if its value is subnormal.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_subnormal(self) -> Self::Mask;
/// Returns true for each lane if its value is neither zero, infinite,
/// subnormal, nor `NaN`.
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_normal(self) -> Self::Mask;
/// Replaces each lane with a number that represents its sign.
///
/// * `1.0` if the number is positive, `+0.0`, or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0`, or `NEG_INFINITY`
/// * `NAN` if the number is `NAN`
#[must_use = "method returns a new vector and does not mutate the original value"]
fn signum(self) -> Self;
/// Returns each lane with the magnitude of `self` and the sign of `sign`.
///
/// For any lane containing a `NAN`, a `NAN` with the sign of `sign` is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn copysign(self, sign: Self) -> Self;
/// Returns the minimum of each lane.
///
/// If one of the values is `NAN`, then the other value is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_min(self, other: Self) -> Self;
/// Returns the maximum of each lane.
///
/// If one of the values is `NAN`, then the other value is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_max(self, other: Self) -> Self;
/// Restrict each lane to a certain interval unless it is NaN.
///
/// For each lane in `self`, returns the corresponding lane in `max` if the lane is
/// greater than `max`, and the corresponding lane in `min` if the lane is less
/// than `min`. Otherwise returns the lane in `self`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_clamp(self, min: Self, max: Self) -> Self;
/// Returns the sum of the lanes of the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::f32x2;
/// let v = f32x2::from_array([1., 2.]);
/// assert_eq!(v.reduce_sum(), 3.);
/// ```
fn reduce_sum(self) -> Self::Scalar;
/// Reducing multiply. Returns the product of the lanes of the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::f32x2;
/// let v = f32x2::from_array([3., 4.]);
/// assert_eq!(v.reduce_product(), 12.);
/// ```
fn reduce_product(self) -> Self::Scalar;
/// Returns the maximum lane in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either.
///
/// This function will not return `NaN` unless all lanes are `NaN`.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::f32x2;
/// let v = f32x2::from_array([1., 2.]);
/// assert_eq!(v.reduce_max(), 2.);
///
/// // NaN values are skipped...
/// let v = f32x2::from_array([1., f32::NAN]);
/// assert_eq!(v.reduce_max(), 1.);
///
/// // ...unless all values are NaN
/// let v = f32x2::from_array([f32::NAN, f32::NAN]);
/// assert!(v.reduce_max().is_nan());
/// ```
fn reduce_max(self) -> Self::Scalar;
/// Returns the minimum lane in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either.
///
/// This function will not return `NaN` unless all lanes are `NaN`.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::f32x2;
/// let v = f32x2::from_array([3., 7.]);
/// assert_eq!(v.reduce_min(), 3.);
///
/// // NaN values are skipped...
/// let v = f32x2::from_array([1., f32::NAN]);
/// assert_eq!(v.reduce_min(), 1.);
///
/// // ...unless all values are NaN
/// let v = f32x2::from_array([f32::NAN, f32::NAN]);
/// assert!(v.reduce_min().is_nan());
/// ```
fn reduce_min(self) -> Self::Scalar;
}
macro_rules! impl_trait {
{ $($ty:ty { bits: $bits_ty:ty, mask: $mask_ty:ty }),* } => {
$(
impl<const LANES: usize> Sealed for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> SimdFloat for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
type Mask = Mask<<$mask_ty as SimdElement>::Mask, LANES>;
type Scalar = $ty;
type Bits = Simd<$bits_ty, LANES>;
#[inline]
fn to_bits(self) -> Simd<$bits_ty, LANES> {
assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Self::Bits>());
unsafe { core::mem::transmute_copy(&self) }
}
#[inline]
fn from_bits(bits: Simd<$bits_ty, LANES>) -> Self {
assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Self::Bits>());
unsafe { core::mem::transmute_copy(&bits) }
}
#[inline]
fn abs(self) -> Self {
unsafe { intrinsics::simd_fabs(self) }
}
#[inline]
fn recip(self) -> Self {
Self::splat(1.0) / self
}
#[inline]
fn to_degrees(self) -> Self {
// to_degrees uses a special constant for better precision, so extract that constant
self * Self::splat(Self::Scalar::to_degrees(1.))
}
#[inline]
fn to_radians(self) -> Self {
self * Self::splat(Self::Scalar::to_radians(1.))
}
#[inline]
fn is_sign_positive(self) -> Self::Mask {
!self.is_sign_negative()
}
#[inline]
fn is_sign_negative(self) -> Self::Mask {
let sign_bits = self.to_bits() & Simd::splat((!0 >> 1) + 1);
sign_bits.simd_gt(Simd::splat(0))
}
#[inline]
fn is_nan(self) -> Self::Mask {
self.simd_ne(self)
}
#[inline]
fn is_infinite(self) -> Self::Mask {
self.abs().simd_eq(Self::splat(Self::Scalar::INFINITY))
}
#[inline]
fn is_finite(self) -> Self::Mask {
self.abs().simd_lt(Self::splat(Self::Scalar::INFINITY))
}
#[inline]
fn is_subnormal(self) -> Self::Mask {
self.abs().simd_ne(Self::splat(0.0)) & (self.to_bits() & Self::splat(Self::Scalar::INFINITY).to_bits()).simd_eq(Simd::splat(0))
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn is_normal(self) -> Self::Mask {
!(self.abs().simd_eq(Self::splat(0.0)) | self.is_nan() | self.is_subnormal() | self.is_infinite())
}
#[inline]
fn signum(self) -> Self {
self.is_nan().select(Self::splat(Self::Scalar::NAN), Self::splat(1.0).copysign(self))
}
#[inline]
fn copysign(self, sign: Self) -> Self {
let sign_bit = sign.to_bits() & Self::splat(-0.).to_bits();
let magnitude = self.to_bits() & !Self::splat(-0.).to_bits();
Self::from_bits(sign_bit | magnitude)
}
#[inline]
fn simd_min(self, other: Self) -> Self {
unsafe { intrinsics::simd_fmin(self, other) }
}
#[inline]
fn simd_max(self, other: Self) -> Self {
unsafe { intrinsics::simd_fmax(self, other) }
}
#[inline]
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
"each lane in `min` must be less than or equal to the corresponding lane in `max`",
);
let mut x = self;
x = x.simd_lt(min).select(min, x);
x = x.simd_gt(max).select(max, x);
x
}
#[inline]
fn reduce_sum(self) -> Self::Scalar {
// LLVM sum is inaccurate on i586
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
self.as_array().iter().sum()
} else {
// Safety: `self` is a float vector
unsafe { intrinsics::simd_reduce_add_ordered(self, 0.) }
}
}
#[inline]
fn reduce_product(self) -> Self::Scalar {
// LLVM product is inaccurate on i586
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
self.as_array().iter().product()
} else {
// Safety: `self` is a float vector
unsafe { intrinsics::simd_reduce_mul_ordered(self, 1.) }
}
}
#[inline]
fn reduce_max(self) -> Self::Scalar {
// Safety: `self` is a float vector
unsafe { intrinsics::simd_reduce_max(self) }
}
#[inline]
fn reduce_min(self) -> Self::Scalar {
// Safety: `self` is a float vector
unsafe { intrinsics::simd_reduce_min(self) }
}
}
)*
}
}
impl_trait! { f32 { bits: u32, mask: i32 }, f64 { bits: u64, mask: i64 } }

View File

@ -0,0 +1,280 @@
use super::sealed::Sealed;
use crate::simd::{
intrinsics, LaneCount, Mask, Simd, SimdElement, SimdPartialOrd, SupportedLaneCount,
};
/// Operations on SIMD vectors of signed integers.
pub trait SimdInt: Copy + Sealed {
/// Mask type used for manipulating this SIMD vector type.
type Mask;
/// Scalar type contained by this SIMD vector type.
type Scalar;
/// Lanewise saturating add.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::i32::{MIN, MAX};
/// let x = Simd::from_array([MIN, 0, 1, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x + max;
/// let sat = x.saturating_add(max);
/// assert_eq!(unsat, Simd::from_array([-1, MAX, MIN, -2]));
/// assert_eq!(sat, Simd::from_array([-1, MAX, MAX, MAX]));
/// ```
fn saturating_add(self, second: Self) -> Self;
/// Lanewise saturating subtract.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::i32::{MIN, MAX};
/// let x = Simd::from_array([MIN, -2, -1, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x - max;
/// let sat = x.saturating_sub(max);
/// assert_eq!(unsat, Simd::from_array([1, MAX, MIN, 0]));
/// assert_eq!(sat, Simd::from_array([MIN, MIN, MIN, 0]));
fn saturating_sub(self, second: Self) -> Self;
/// Lanewise absolute value, implemented in Rust.
/// Every lane becomes its absolute value.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::i32::{MIN, MAX};
/// let xs = Simd::from_array([MIN, MIN +1, -5, 0]);
/// assert_eq!(xs.abs(), Simd::from_array([MIN, MAX, 5, 0]));
/// ```
fn abs(self) -> Self;
/// Lanewise saturating absolute value, implemented in Rust.
/// As abs(), except the MIN value becomes MAX instead of itself.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::i32::{MIN, MAX};
/// let xs = Simd::from_array([MIN, -2, 0, 3]);
/// let unsat = xs.abs();
/// let sat = xs.saturating_abs();
/// assert_eq!(unsat, Simd::from_array([MIN, 2, 0, 3]));
/// assert_eq!(sat, Simd::from_array([MAX, 2, 0, 3]));
/// ```
fn saturating_abs(self) -> Self;
/// Lanewise saturating negation, implemented in Rust.
/// As neg(), except the MIN value becomes MAX instead of itself.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::i32::{MIN, MAX};
/// let x = Simd::from_array([MIN, -2, 3, MAX]);
/// let unsat = -x;
/// let sat = x.saturating_neg();
/// assert_eq!(unsat, Simd::from_array([MIN, 2, -3, MIN + 1]));
/// assert_eq!(sat, Simd::from_array([MAX, 2, -3, MIN + 1]));
/// ```
fn saturating_neg(self) -> Self;
/// Returns true for each positive lane and false if it is zero or negative.
fn is_positive(self) -> Self::Mask;
/// Returns true for each negative lane and false if it is zero or positive.
fn is_negative(self) -> Self::Mask;
/// Returns numbers representing the sign of each lane.
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
fn signum(self) -> Self;
/// Returns the sum of the lanes of the vector, with wrapping addition.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::i32x4;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_sum(), 10);
///
/// // SIMD integer addition is always wrapping
/// let v = i32x4::from_array([i32::MAX, 1, 0, 0]);
/// assert_eq!(v.reduce_sum(), i32::MIN);
/// ```
fn reduce_sum(self) -> Self::Scalar;
/// Returns the product of the lanes of the vector, with wrapping multiplication.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::i32x4;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_product(), 24);
///
/// // SIMD integer multiplication is always wrapping
/// let v = i32x4::from_array([i32::MAX, 2, 1, 1]);
/// assert!(v.reduce_product() < i32::MAX);
/// ```
fn reduce_product(self) -> Self::Scalar;
/// Returns the maximum lane in the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::i32x4;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_max(), 4);
/// ```
fn reduce_max(self) -> Self::Scalar;
/// Returns the minimum lane in the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::i32x4;
/// let v = i32x4::from_array([1, 2, 3, 4]);
/// assert_eq!(v.reduce_min(), 1);
/// ```
fn reduce_min(self) -> Self::Scalar;
/// Returns the cumulative bitwise "and" across the lanes of the vector.
fn reduce_and(self) -> Self::Scalar;
/// Returns the cumulative bitwise "or" across the lanes of the vector.
fn reduce_or(self) -> Self::Scalar;
/// Returns the cumulative bitwise "xor" across the lanes of the vector.
fn reduce_xor(self) -> Self::Scalar;
}
macro_rules! impl_trait {
{ $($ty:ty),* } => {
$(
impl<const LANES: usize> Sealed for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> SimdInt for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
type Mask = Mask<<$ty as SimdElement>::Mask, LANES>;
type Scalar = $ty;
#[inline]
fn saturating_add(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { intrinsics::simd_saturating_add(self, second) }
}
#[inline]
fn saturating_sub(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { intrinsics::simd_saturating_sub(self, second) }
}
#[inline]
fn abs(self) -> Self {
const SHR: $ty = <$ty>::BITS as $ty - 1;
let m = self >> Simd::splat(SHR);
(self^m) - m
}
#[inline]
fn saturating_abs(self) -> Self {
// arith shift for -1 or 0 mask based on sign bit, giving 2s complement
const SHR: $ty = <$ty>::BITS as $ty - 1;
let m = self >> Simd::splat(SHR);
(self^m).saturating_sub(m)
}
#[inline]
fn saturating_neg(self) -> Self {
Self::splat(0).saturating_sub(self)
}
#[inline]
fn is_positive(self) -> Self::Mask {
self.simd_gt(Self::splat(0))
}
#[inline]
fn is_negative(self) -> Self::Mask {
self.simd_lt(Self::splat(0))
}
#[inline]
fn signum(self) -> Self {
self.is_positive().select(
Self::splat(1),
self.is_negative().select(Self::splat(-1), Self::splat(0))
)
}
#[inline]
fn reduce_sum(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_add_ordered(self, 0) }
}
#[inline]
fn reduce_product(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_mul_ordered(self, 1) }
}
#[inline]
fn reduce_max(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_max(self) }
}
#[inline]
fn reduce_min(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_min(self) }
}
#[inline]
fn reduce_and(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_and(self) }
}
#[inline]
fn reduce_or(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_or(self) }
}
#[inline]
fn reduce_xor(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_xor(self) }
}
}
)*
}
}
impl_trait! { i8, i16, i32, i64, isize }

View File

@ -0,0 +1,135 @@
use super::sealed::Sealed;
use crate::simd::{intrinsics, LaneCount, Simd, SupportedLaneCount};
/// Operations on SIMD vectors of unsigned integers.
pub trait SimdUint: Copy + Sealed {
/// Scalar type contained by this SIMD vector type.
type Scalar;
/// Lanewise saturating add.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::u32::MAX;
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x + max;
/// let sat = x.saturating_add(max);
/// assert_eq!(unsat, Simd::from_array([1, 0, MAX, MAX - 1]));
/// assert_eq!(sat, max);
/// ```
fn saturating_add(self, second: Self) -> Self;
/// Lanewise saturating subtract.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
/// use core::u32::MAX;
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x - max;
/// let sat = x.saturating_sub(max);
/// assert_eq!(unsat, Simd::from_array([3, 2, 1, 0]));
/// assert_eq!(sat, Simd::splat(0));
fn saturating_sub(self, second: Self) -> Self;
/// Returns the sum of the lanes of the vector, with wrapping addition.
fn reduce_sum(self) -> Self::Scalar;
/// Returns the product of the lanes of the vector, with wrapping multiplication.
fn reduce_product(self) -> Self::Scalar;
/// Returns the maximum lane in the vector.
fn reduce_max(self) -> Self::Scalar;
/// Returns the minimum lane in the vector.
fn reduce_min(self) -> Self::Scalar;
/// Returns the cumulative bitwise "and" across the lanes of the vector.
fn reduce_and(self) -> Self::Scalar;
/// Returns the cumulative bitwise "or" across the lanes of the vector.
fn reduce_or(self) -> Self::Scalar;
/// Returns the cumulative bitwise "xor" across the lanes of the vector.
fn reduce_xor(self) -> Self::Scalar;
}
macro_rules! impl_trait {
{ $($ty:ty),* } => {
$(
impl<const LANES: usize> Sealed for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> SimdUint for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
type Scalar = $ty;
#[inline]
fn saturating_add(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { intrinsics::simd_saturating_add(self, second) }
}
#[inline]
fn saturating_sub(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { intrinsics::simd_saturating_sub(self, second) }
}
#[inline]
fn reduce_sum(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_add_ordered(self, 0) }
}
#[inline]
fn reduce_product(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_mul_ordered(self, 1) }
}
#[inline]
fn reduce_max(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_max(self) }
}
#[inline]
fn reduce_min(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_min(self) }
}
#[inline]
fn reduce_and(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_and(self) }
}
#[inline]
fn reduce_or(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_or(self) }
}
#[inline]
fn reduce_xor(self) -> Self::Scalar {
// Safety: `self` is an integer vector
unsafe { intrinsics::simd_reduce_xor(self) }
}
}
)*
}
}
impl_trait! { u8, u16, u32, u64, usize }

View File

@ -1,156 +0,0 @@
use crate::simd::intrinsics::{simd_saturating_add, simd_saturating_sub};
use crate::simd::{LaneCount, Simd, SupportedLaneCount};
macro_rules! impl_uint_arith {
($($ty:ty),+) => {
$( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
/// Lanewise saturating add.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::MAX;")]
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x + max;
/// let sat = x.saturating_add(max);
/// assert_eq!(unsat, Simd::from_array([1, 0, MAX, MAX - 1]));
/// assert_eq!(sat, max);
/// ```
#[inline]
pub fn saturating_add(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { simd_saturating_add(self, second) }
}
/// Lanewise saturating subtract.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::MAX;")]
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x - max;
/// let sat = x.saturating_sub(max);
/// assert_eq!(unsat, Simd::from_array([3, 2, 1, 0]));
/// assert_eq!(sat, Simd::splat(0));
#[inline]
pub fn saturating_sub(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { simd_saturating_sub(self, second) }
}
})+
}
}
macro_rules! impl_int_arith {
($($ty:ty),+) => {
$( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
/// Lanewise saturating add.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let x = Simd::from_array([MIN, 0, 1, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x + max;
/// let sat = x.saturating_add(max);
/// assert_eq!(unsat, Simd::from_array([-1, MAX, MIN, -2]));
/// assert_eq!(sat, Simd::from_array([-1, MAX, MAX, MAX]));
/// ```
#[inline]
pub fn saturating_add(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { simd_saturating_add(self, second) }
}
/// Lanewise saturating subtract.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let x = Simd::from_array([MIN, -2, -1, MAX]);
/// let max = Simd::splat(MAX);
/// let unsat = x - max;
/// let sat = x.saturating_sub(max);
/// assert_eq!(unsat, Simd::from_array([1, MAX, MIN, 0]));
/// assert_eq!(sat, Simd::from_array([MIN, MIN, MIN, 0]));
#[inline]
pub fn saturating_sub(self, second: Self) -> Self {
// Safety: `self` is a vector
unsafe { simd_saturating_sub(self, second) }
}
/// Lanewise absolute value, implemented in Rust.
/// Every lane becomes its absolute value.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let xs = Simd::from_array([MIN, MIN +1, -5, 0]);
/// assert_eq!(xs.abs(), Simd::from_array([MIN, MAX, 5, 0]));
/// ```
#[inline]
pub fn abs(self) -> Self {
const SHR: $ty = <$ty>::BITS as $ty - 1;
let m = self >> Simd::splat(SHR);
(self^m) - m
}
/// Lanewise saturating absolute value, implemented in Rust.
/// As abs(), except the MIN value becomes MAX instead of itself.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let xs = Simd::from_array([MIN, -2, 0, 3]);
/// let unsat = xs.abs();
/// let sat = xs.saturating_abs();
/// assert_eq!(unsat, Simd::from_array([MIN, 2, 0, 3]));
/// assert_eq!(sat, Simd::from_array([MAX, 2, 0, 3]));
/// ```
#[inline]
pub fn saturating_abs(self) -> Self {
// arith shift for -1 or 0 mask based on sign bit, giving 2s complement
const SHR: $ty = <$ty>::BITS as $ty - 1;
let m = self >> Simd::splat(SHR);
(self^m).saturating_sub(m)
}
/// Lanewise saturating negation, implemented in Rust.
/// As neg(), except the MIN value becomes MAX instead of itself.
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let x = Simd::from_array([MIN, -2, 3, MAX]);
/// let unsat = -x;
/// let sat = x.saturating_neg();
/// assert_eq!(unsat, Simd::from_array([MIN, 2, -3, MIN + 1]));
/// assert_eq!(sat, Simd::from_array([MAX, 2, -3, MIN + 1]));
/// ```
#[inline]
pub fn saturating_neg(self) -> Self {
Self::splat(0).saturating_sub(self)
}
})+
}
}
impl_uint_arith! { u8, u16, u32, u64, usize }
impl_int_arith! { i8, i16, i32, i64, isize }

View File

@ -1,6 +1,3 @@
#[macro_use]
mod reduction;
#[macro_use]
mod swizzle;
@ -9,12 +6,12 @@ pub(crate) mod intrinsics;
#[cfg(feature = "generic_const_exprs")]
mod to_bytes;
mod elements;
mod eq;
mod fmt;
mod iter;
mod lane_count;
mod masks;
mod math;
mod ops;
mod ord;
mod round;
@ -26,6 +23,7 @@ mod vendor;
pub mod simd {
pub(crate) use crate::core_simd::intrinsics;
pub use crate::core_simd::elements::*;
pub use crate::core_simd::eq::*;
pub use crate::core_simd::lane_count::{LaneCount, SupportedLaneCount};
pub use crate::core_simd::masks::*;

View File

@ -1,267 +0,0 @@
use crate::simd::intrinsics::{
simd_reduce_add_ordered, simd_reduce_and, simd_reduce_max, simd_reduce_min,
simd_reduce_mul_ordered, simd_reduce_or, simd_reduce_xor,
};
use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
use core::ops::{BitAnd, BitOr, BitXor};
macro_rules! impl_integer_reductions {
{ $scalar:ty } => {
impl<const LANES: usize> Simd<$scalar, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
/// Reducing wrapping add. Returns the sum of the lanes of the vector, with wrapping addition.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x4;")]
#[doc = concat!("let v = ", stringify!($scalar), "x4::from_array([1, 2, 3, 4]);")]
/// assert_eq!(v.reduce_sum(), 10);
///
/// // SIMD integer addition is always wrapping
#[doc = concat!("let v = ", stringify!($scalar), "x4::from_array([", stringify!($scalar) ,"::MAX, 1, 0, 0]);")]
#[doc = concat!("assert_eq!(v.reduce_sum(), ", stringify!($scalar), "::MIN);")]
/// ```
#[inline]
pub fn reduce_sum(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_add_ordered(self, 0) }
}
/// Reducing wrapping multiply. Returns the product of the lanes of the vector, with wrapping multiplication.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x4;")]
#[doc = concat!("let v = ", stringify!($scalar), "x4::from_array([1, 2, 3, 4]);")]
/// assert_eq!(v.reduce_product(), 24);
///
/// // SIMD integer multiplication is always wrapping
#[doc = concat!("let v = ", stringify!($scalar), "x4::from_array([", stringify!($scalar) ,"::MAX, 2, 1, 1]);")]
#[doc = concat!("assert!(v.reduce_product() < ", stringify!($scalar), "::MAX);")]
/// ```
#[inline]
pub fn reduce_product(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_mul_ordered(self, 1) }
}
/// Reducing maximum. Returns the maximum lane in the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x4;")]
#[doc = concat!("let v = ", stringify!($scalar), "x4::from_array([1, 2, 3, 4]);")]
/// assert_eq!(v.reduce_max(), 4);
/// ```
#[inline]
pub fn reduce_max(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_max(self) }
}
/// Reducing minimum. Returns the minimum lane in the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x4;")]
#[doc = concat!("let v = ", stringify!($scalar), "x4::from_array([1, 2, 3, 4]);")]
/// assert_eq!(v.reduce_min(), 1);
/// ```
#[inline]
pub fn reduce_min(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_min(self) }
}
}
}
}
impl_integer_reductions! { i8 }
impl_integer_reductions! { i16 }
impl_integer_reductions! { i32 }
impl_integer_reductions! { i64 }
impl_integer_reductions! { isize }
impl_integer_reductions! { u8 }
impl_integer_reductions! { u16 }
impl_integer_reductions! { u32 }
impl_integer_reductions! { u64 }
impl_integer_reductions! { usize }
macro_rules! impl_float_reductions {
{ $scalar:ty } => {
impl<const LANES: usize> Simd<$scalar, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
/// Reducing add. Returns the sum of the lanes of the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x2;")]
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([1., 2.]);")]
/// assert_eq!(v.reduce_sum(), 3.);
/// ```
#[inline]
pub fn reduce_sum(self) -> $scalar {
// LLVM sum is inaccurate on i586
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
self.as_array().iter().sum()
} else {
// Safety: `self` is a float vector
unsafe { simd_reduce_add_ordered(self, 0.) }
}
}
/// Reducing multiply. Returns the product of the lanes of the vector.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x2;")]
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([3., 4.]);")]
/// assert_eq!(v.reduce_product(), 12.);
/// ```
#[inline]
pub fn reduce_product(self) -> $scalar {
// LLVM product is inaccurate on i586
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
self.as_array().iter().product()
} else {
// Safety: `self` is a float vector
unsafe { simd_reduce_mul_ordered(self, 1.) }
}
}
/// Reducing maximum. Returns the maximum lane in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either.
///
/// This function will not return `NaN` unless all lanes are `NaN`.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x2;")]
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([1., 2.]);")]
/// assert_eq!(v.reduce_max(), 2.);
///
/// // NaN values are skipped...
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([1., ", stringify!($scalar), "::NAN]);")]
/// assert_eq!(v.reduce_max(), 1.);
///
/// // ...unless all values are NaN
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([",
stringify!($scalar), "::NAN, ",
stringify!($scalar), "::NAN]);"
)]
/// assert!(v.reduce_max().is_nan());
/// ```
#[inline]
pub fn reduce_max(self) -> $scalar {
// Safety: `self` is a float vector
unsafe { simd_reduce_max(self) }
}
/// Reducing minimum. Returns the minimum lane in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either.
///
/// This function will not return `NaN` unless all lanes are `NaN`.
///
/// # Examples
///
/// ```
/// # #![feature(portable_simd)]
/// # use core::simd::Simd;
#[doc = concat!("# use core::simd::", stringify!($scalar), "x2;")]
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([3., 7.]);")]
/// assert_eq!(v.reduce_min(), 3.);
///
/// // NaN values are skipped...
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([1., ", stringify!($scalar), "::NAN]);")]
/// assert_eq!(v.reduce_min(), 1.);
///
/// // ...unless all values are NaN
#[doc = concat!("let v = ", stringify!($scalar), "x2::from_array([",
stringify!($scalar), "::NAN, ",
stringify!($scalar), "::NAN]);"
)]
/// assert!(v.reduce_min().is_nan());
/// ```
#[inline]
pub fn reduce_min(self) -> $scalar {
// Safety: `self` is a float vector
unsafe { simd_reduce_min(self) }
}
}
}
}
impl_float_reductions! { f32 }
impl_float_reductions! { f64 }
impl<T, const LANES: usize> Simd<T, LANES>
where
Self: BitAnd<Self, Output = Self>,
T: SimdElement + BitAnd<T, Output = T>,
LaneCount<LANES>: SupportedLaneCount,
{
/// Reducing bitwise "and". Returns the cumulative bitwise "and" across the lanes of
/// the vector.
#[inline]
pub fn reduce_and(self) -> T {
unsafe { simd_reduce_and(self) }
}
}
impl<T, const LANES: usize> Simd<T, LANES>
where
Self: BitOr<Self, Output = Self>,
T: SimdElement + BitOr<T, Output = T>,
LaneCount<LANES>: SupportedLaneCount,
{
/// Reducing bitwise "or". Returns the cumulative bitwise "or" across the lanes of
/// the vector.
#[inline]
pub fn reduce_or(self) -> T {
unsafe { simd_reduce_or(self) }
}
}
impl<T, const LANES: usize> Simd<T, LANES>
where
Self: BitXor<Self, Output = Self>,
T: SimdElement + BitXor<T, Output = T>,
LaneCount<LANES>: SupportedLaneCount,
{
/// Reducing bitwise "xor". Returns the cumulative bitwise "xor" across the lanes of
/// the vector.
#[inline]
pub fn reduce_xor(self) -> T {
unsafe { simd_reduce_xor(self) }
}
}

View File

@ -1,145 +1,6 @@
#![allow(non_camel_case_types)]
use crate::simd::intrinsics;
use crate::simd::{LaneCount, Mask, Simd, SimdPartialEq, SimdPartialOrd, SupportedLaneCount};
/// Implements inherent methods for a float vector containing multiple
/// `$lanes` of float `$type`, which uses `$bits_ty` as its binary
/// representation.
macro_rules! impl_float_vector {
{ $type:ty, $bits_ty:ty, $mask_ty:ty } => {
impl<const LANES: usize> Simd<$type, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
/// Raw transmutation to an unsigned integer vector type with the
/// same size and number of lanes.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn to_bits(self) -> Simd<$bits_ty, LANES> {
assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Simd<$bits_ty, LANES>>());
unsafe { core::mem::transmute_copy(&self) }
}
/// Raw transmutation from an unsigned integer vector type with the
/// same size and number of lanes.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn from_bits(bits: Simd<$bits_ty, LANES>) -> Self {
assert_eq!(core::mem::size_of::<Self>(), core::mem::size_of::<Simd<$bits_ty, LANES>>());
unsafe { core::mem::transmute_copy(&bits) }
}
/// Produces a vector where every lane has the absolute value of the
/// equivalently-indexed lane in `self`.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn abs(self) -> Self {
unsafe { intrinsics::simd_fabs(self) }
}
/// Takes the reciprocal (inverse) of each lane, `1/x`.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn recip(self) -> Self {
Self::splat(1.0) / self
}
/// Converts each lane from radians to degrees.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn to_degrees(self) -> Self {
// to_degrees uses a special constant for better precision, so extract that constant
self * Self::splat(<$type>::to_degrees(1.))
}
/// Converts each lane from degrees to radians.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn to_radians(self) -> Self {
self * Self::splat(<$type>::to_radians(1.))
}
/// Returns true for each lane if it has a positive sign, including
/// `+0.0`, `NaN`s with positive sign bit and positive infinity.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_sign_positive(self) -> Mask<$mask_ty, LANES> {
!self.is_sign_negative()
}
/// Returns true for each lane if it has a negative sign, including
/// `-0.0`, `NaN`s with negative sign bit and negative infinity.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_sign_negative(self) -> Mask<$mask_ty, LANES> {
let sign_bits = self.to_bits() & Simd::splat((!0 >> 1) + 1);
sign_bits.simd_gt(Simd::splat(0))
}
/// Returns true for each lane if its value is `NaN`.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_nan(self) -> Mask<$mask_ty, LANES> {
self.simd_ne(self)
}
/// Returns true for each lane if its value is positive infinity or negative infinity.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_infinite(self) -> Mask<$mask_ty, LANES> {
self.abs().simd_eq(Self::splat(<$type>::INFINITY))
}
/// Returns true for each lane if its value is neither infinite nor `NaN`.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_finite(self) -> Mask<$mask_ty, LANES> {
self.abs().simd_lt(Self::splat(<$type>::INFINITY))
}
/// Returns true for each lane if its value is subnormal.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_subnormal(self) -> Mask<$mask_ty, LANES> {
self.abs().simd_ne(Self::splat(0.0)) & (self.to_bits() & Self::splat(<$type>::INFINITY).to_bits()).simd_eq(Simd::splat(0))
}
/// Returns true for each lane if its value is neither zero, infinite,
/// subnormal, nor `NaN`.
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn is_normal(self) -> Mask<$mask_ty, LANES> {
!(self.abs().simd_eq(Self::splat(0.0)) | self.is_nan() | self.is_subnormal() | self.is_infinite())
}
/// Replaces each lane with a number that represents its sign.
///
/// * `1.0` if the number is positive, `+0.0`, or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0`, or `NEG_INFINITY`
/// * `NAN` if the number is `NAN`
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn signum(self) -> Self {
self.is_nan().select(Self::splat(<$type>::NAN), Self::splat(1.0).copysign(self))
}
/// Returns each lane with the magnitude of `self` and the sign of `sign`.
///
/// If any lane is a `NAN`, then a `NAN` with the sign of `sign` is returned.
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn copysign(self, sign: Self) -> Self {
let sign_bit = sign.to_bits() & Self::splat(-0.).to_bits();
let magnitude = self.to_bits() & !Self::splat(-0.).to_bits();
Self::from_bits(sign_bit | magnitude)
}
}
};
}
impl_float_vector! { f32, u32, i32 }
impl_float_vector! { f64, u64, i64 }
use crate::simd::Simd;
/// A 64-bit SIMD vector with two elements of type `f32`.
pub type f32x2 = Simd<f32, 2>;
@ -161,73 +22,3 @@ pub type f64x4 = Simd<f64, 4>;
/// A 512-bit SIMD vector with eight elements of type `f64`.
pub type f64x8 = Simd<f64, 8>;
mod sealed {
pub trait Sealed {}
}
use sealed::Sealed;
/// SIMD operations on vectors of floating point numbers.
pub trait SimdFloat: Sized + Sealed {
/// Returns the minimum of each lane.
///
/// If one of the values is `NAN`, then the other value is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_min(self, other: Self) -> Self;
/// Returns the maximum of each lane.
///
/// If one of the values is `NAN`, then the other value is returned.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_max(self, other: Self) -> Self;
/// Restrict each lane to a certain interval unless it is NaN.
///
/// For each lane in `self`, returns the corresponding lane in `max` if the lane is
/// greater than `max`, and the corresponding lane in `min` if the lane is less
/// than `min`. Otherwise returns the lane in `self`.
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_clamp(self, min: Self, max: Self) -> Self;
}
macro_rules! impl_simd_float {
{ $($float:ty),* } => {
$(
impl <const LANES: usize> Sealed for Simd<$float, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
}
impl <const LANES: usize> SimdFloat for Simd<$float, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
fn simd_min(self, other: Self) -> Self {
unsafe { intrinsics::simd_fmin(self, other) }
}
#[inline]
fn simd_max(self, other: Self) -> Self {
unsafe { intrinsics::simd_fmax(self, other) }
}
#[inline]
fn simd_clamp(self, min: Self, max: Self) -> Self {
assert!(
min.simd_le(max).all(),
"each lane in `min` must be less than or equal to the corresponding lane in `max`",
);
let mut x = self;
x = x.simd_lt(min).select(min, x);
x = x.simd_gt(max).select(max, x);
x
}
}
)*
}
}
impl_simd_float! { f32, f64 }

View File

@ -1,46 +1,6 @@
#![allow(non_camel_case_types)]
use crate::simd::{LaneCount, Mask, Simd, SimdPartialOrd, SupportedLaneCount};
/// Implements additional integer traits (Eq, Ord, Hash) on the specified vector `$name`, holding multiple `$lanes` of `$type`.
macro_rules! impl_integer_vector {
{ $type:ty } => {
impl<const LANES: usize> Simd<$type, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
/// Returns true for each positive lane and false if it is zero or negative.
#[inline]
pub fn is_positive(self) -> Mask<$type, LANES> {
self.simd_gt(Self::splat(0))
}
/// Returns true for each negative lane and false if it is zero or positive.
#[inline]
pub fn is_negative(self) -> Mask<$type, LANES> {
self.simd_lt(Self::splat(0))
}
/// Returns numbers representing the sign of each lane.
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
#[inline]
pub fn signum(self) -> Self {
self.is_positive().select(
Self::splat(1),
self.is_negative().select(Self::splat(-1), Self::splat(0))
)
}
}
}
}
impl_integer_vector! { isize }
impl_integer_vector! { i16 }
impl_integer_vector! { i32 }
impl_integer_vector! { i64 }
impl_integer_vector! { i8 }
use crate::simd::Simd;
/// A SIMD vector with two elements of type `isize`.
pub type isizex2 = Simd<isize, 2>;

View File

@ -172,6 +172,7 @@ macro_rules! impl_common_integer_tests {
macro_rules! impl_signed_tests {
{ $scalar:tt } => {
mod $scalar {
use core_simd::simd::SimdInt;
type Vector<const LANES: usize> = core_simd::Simd<Scalar, LANES>;
type Scalar = $scalar;
@ -312,6 +313,7 @@ macro_rules! impl_signed_tests {
macro_rules! impl_unsigned_tests {
{ $scalar:tt } => {
mod $scalar {
use core_simd::simd::SimdUint;
type Vector<const LANES: usize> = core_simd::Simd<Scalar, LANES>;
type Scalar = $scalar;
@ -346,6 +348,7 @@ macro_rules! impl_unsigned_tests {
macro_rules! impl_float_tests {
{ $scalar:tt, $int_scalar:tt } => {
mod $scalar {
use core_simd::SimdFloat;
type Vector<const LANES: usize> = core_simd::Simd<Scalar, LANES>;
type Scalar = $scalar;
@ -462,7 +465,6 @@ macro_rules! impl_float_tests {
}
fn simd_min<const LANES: usize>() {
use core_simd::simd::SimdFloat;
// Regular conditions (both values aren't zero)
test_helpers::test_binary_elementwise(
&Vector::<LANES>::simd_min,
@ -486,7 +488,6 @@ macro_rules! impl_float_tests {
}
fn simd_max<const LANES: usize>() {
use core_simd::simd::SimdFloat;
// Regular conditions (both values aren't zero)
test_helpers::test_binary_elementwise(
&Vector::<LANES>::simd_max,
@ -510,7 +511,6 @@ macro_rules! impl_float_tests {
}
fn simd_clamp<const LANES: usize>() {
use core_simd::simd::SimdFloat;
test_helpers::test_3(&|value: [Scalar; LANES], mut min: [Scalar; LANES], mut max: [Scalar; LANES]| {
for (min, max) in min.iter_mut().zip(max.iter_mut()) {
if max < min {