Merge pull request #80 from rust-lang/feature/comparisons

Add classification functions
This commit is contained in:
Jubilee 2021-04-09 07:39:47 -07:00 committed by GitHub
commit 0682c31fce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 537 additions and 299 deletions

View File

@ -0,0 +1,86 @@
use crate::LanesAtMost32;
macro_rules! implement_mask_ops {
{ $($vector:ident => $mask:ident ($inner_mask_ty:ident, $inner_ty:ident),)* } => {
$(
impl<const LANES: usize> crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost32,
crate::$inner_ty<LANES>: LanesAtMost32,
{
/// Test if each lane is equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_eq(self, other: Self) -> crate::$mask<LANES> {
unsafe {
crate::$inner_mask_ty::from_int_unchecked(crate::intrinsics::simd_eq(self, other))
.into()
}
}
/// Test if each lane is not equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_ne(self, other: Self) -> crate::$mask<LANES> {
unsafe {
crate::$inner_mask_ty::from_int_unchecked(crate::intrinsics::simd_ne(self, other))
.into()
}
}
/// Test if each lane is less than the corresponding lane in `other`.
#[inline]
pub fn lanes_lt(self, other: Self) -> crate::$mask<LANES> {
unsafe {
crate::$inner_mask_ty::from_int_unchecked(crate::intrinsics::simd_lt(self, other))
.into()
}
}
/// Test if each lane is greater than the corresponding lane in `other`.
#[inline]
pub fn lanes_gt(self, other: Self) -> crate::$mask<LANES> {
unsafe {
crate::$inner_mask_ty::from_int_unchecked(crate::intrinsics::simd_gt(self, other))
.into()
}
}
/// Test if each lane is less than or equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_le(self, other: Self) -> crate::$mask<LANES> {
unsafe {
crate::$inner_mask_ty::from_int_unchecked(crate::intrinsics::simd_le(self, other))
.into()
}
}
/// Test if each lane is greater than or equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_ge(self, other: Self) -> crate::$mask<LANES> {
unsafe {
crate::$inner_mask_ty::from_int_unchecked(crate::intrinsics::simd_ge(self, other))
.into()
}
}
}
)*
}
}
implement_mask_ops! {
SimdI8 => Mask8 (SimdMask8, SimdI8),
SimdI16 => Mask16 (SimdMask16, SimdI16),
SimdI32 => Mask32 (SimdMask32, SimdI32),
SimdI64 => Mask64 (SimdMask64, SimdI64),
SimdI128 => Mask128 (SimdMask128, SimdI128),
SimdIsize => MaskSize (SimdMaskSize, SimdIsize),
SimdU8 => Mask8 (SimdMask8, SimdI8),
SimdU16 => Mask16 (SimdMask16, SimdI16),
SimdU32 => Mask32 (SimdMask32, SimdI32),
SimdU64 => Mask64 (SimdMask64, SimdI64),
SimdU128 => Mask128 (SimdMask128, SimdI128),
SimdUsize => MaskSize (SimdMaskSize, SimdIsize),
SimdF32 => Mask32 (SimdMask32, SimdI32),
SimdF64 => Mask64 (SimdMask64, SimdI64),
}

View File

@ -1,7 +1,7 @@
/// Implements common traits on the specified vector `$name`, holding multiple `$lanes` of `$type`.
macro_rules! impl_vector {
{ $name:ident, $type:ty } => {
impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
/// Construct a SIMD vector by setting all lanes to the given value.
pub const fn splat(value: $type) -> Self {
Self([value; LANES])
@ -44,23 +44,23 @@ macro_rules! impl_vector {
}
}
impl<const LANES: usize> Copy for $name<LANES> where Self: crate::LanesAtMost64 {}
impl<const LANES: usize> Copy for $name<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> Clone for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> Clone for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> Default for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> Default for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn default() -> Self {
Self::splat(<$type>::default())
}
}
impl<const LANES: usize> PartialEq for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> PartialEq for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn eq(&self, other: &Self) -> bool {
// TODO use SIMD equality
@ -68,7 +68,7 @@ macro_rules! impl_vector {
}
}
impl<const LANES: usize> PartialOrd for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> PartialOrd for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
// TODO use SIMD equalitya
@ -77,14 +77,14 @@ macro_rules! impl_vector {
}
// array references
impl<const LANES: usize> AsRef<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> AsRef<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn as_ref(&self) -> &[$type; LANES] {
&self.0
}
}
impl<const LANES: usize> AsMut<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> AsMut<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn as_mut(&mut self) -> &mut [$type; LANES] {
&mut self.0
@ -92,14 +92,14 @@ macro_rules! impl_vector {
}
// slice references
impl<const LANES: usize> AsRef<[$type]> for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> AsRef<[$type]> for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn as_ref(&self) -> &[$type] {
&self.0
}
}
impl<const LANES: usize> AsMut<[$type]> for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> AsMut<[$type]> for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn as_mut(&mut self) -> &mut [$type] {
&mut self.0
@ -107,13 +107,13 @@ macro_rules! impl_vector {
}
// vector/array conversion
impl<const LANES: usize> From<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> From<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost32 {
fn from(array: [$type; LANES]) -> Self {
Self(array)
}
}
impl <const LANES: usize> From<$name<LANES>> for [$type; LANES] where $name<LANES>: crate::LanesAtMost64 {
impl <const LANES: usize> From<$name<LANES>> for [$type; LANES] where $name<LANES>: crate::LanesAtMost32 {
fn from(vector: $name<LANES>) -> Self {
vector.to_array()
}

View File

@ -35,7 +35,7 @@ macro_rules! impl_fmt_trait {
$( // repeat trait
impl<const LANES: usize> core::fmt::$trait for crate::$type<LANES>
where
Self: crate::LanesAtMost64,
Self: crate::LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
$format(self.as_ref(), f)

View File

@ -61,7 +61,6 @@ extern "platform-intrinsic" {
pub(crate) fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
pub(crate) fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U;
pub(crate) fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U;
pub(crate) fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U;
// {s,u}add.sat
pub(crate) fn simd_saturating_add<T>(x: T, y: T) -> T;

View File

@ -1,15 +1,14 @@
/// Implemented for bitmask sizes that are supported by the implementation.
pub trait LanesAtMost64 {}
pub trait LanesAtMost32 {}
macro_rules! impl_for {
{ $name:ident } => {
impl LanesAtMost64 for $name<1> {}
impl LanesAtMost64 for $name<2> {}
impl LanesAtMost64 for $name<4> {}
impl LanesAtMost64 for $name<8> {}
impl LanesAtMost64 for $name<16> {}
impl LanesAtMost64 for $name<32> {}
impl LanesAtMost64 for $name<64> {}
impl LanesAtMost32 for $name<1> {}
impl LanesAtMost32 for $name<2> {}
impl LanesAtMost32 for $name<4> {}
impl LanesAtMost32 for $name<8> {}
impl LanesAtMost32 for $name<16> {}
impl LanesAtMost32 for $name<32> {}
}
}

View File

@ -12,6 +12,7 @@ mod permute;
#[macro_use]
mod transmute;
mod comparisons;
mod fmt;
mod intrinsics;
mod ops;
@ -20,7 +21,7 @@ mod round;
mod math;
mod lanes_at_most_64;
pub use lanes_at_most_64::LanesAtMost64;
pub use lanes_at_most_64::LanesAtMost32;
mod masks;
pub use masks::*;

View File

@ -1,15 +1,15 @@
use crate::LanesAtMost64;
use crate::LanesAtMost32;
/// A mask where each lane is represented by a single bit.
#[derive(Copy, Clone, Debug)]
#[repr(transparent)]
pub struct BitMask<const LANES: usize>(u64)
where
BitMask<LANES>: LanesAtMost64;
BitMask<LANES>: LanesAtMost32;
impl<const LANES: usize> BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
@ -43,7 +43,7 @@ where
impl<const LANES: usize> core::ops::BitAnd for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -54,7 +54,7 @@ where
impl<const LANES: usize> core::ops::BitAnd<bool> for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -65,7 +65,7 @@ where
impl<const LANES: usize> core::ops::BitAnd<BitMask<LANES>> for bool
where
BitMask<LANES>: LanesAtMost64,
BitMask<LANES>: LanesAtMost32,
{
type Output = BitMask<LANES>;
#[inline]
@ -76,7 +76,7 @@ where
impl<const LANES: usize> core::ops::BitOr for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -87,7 +87,7 @@ where
impl<const LANES: usize> core::ops::BitOr<bool> for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -98,7 +98,7 @@ where
impl<const LANES: usize> core::ops::BitOr<BitMask<LANES>> for bool
where
BitMask<LANES>: LanesAtMost64,
BitMask<LANES>: LanesAtMost32,
{
type Output = BitMask<LANES>;
#[inline]
@ -109,7 +109,7 @@ where
impl<const LANES: usize> core::ops::BitXor for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -120,7 +120,7 @@ where
impl<const LANES: usize> core::ops::BitXor<bool> for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -131,7 +131,7 @@ where
impl<const LANES: usize> core::ops::BitXor<BitMask<LANES>> for bool
where
BitMask<LANES>: LanesAtMost64,
BitMask<LANES>: LanesAtMost32,
{
type Output = BitMask<LANES>;
#[inline]
@ -142,7 +142,7 @@ where
impl<const LANES: usize> core::ops::Not for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
type Output = BitMask<LANES>;
#[inline]
@ -153,7 +153,7 @@ where
impl<const LANES: usize> core::ops::BitAndAssign for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
@ -163,7 +163,7 @@ where
impl<const LANES: usize> core::ops::BitAndAssign<bool> for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
@ -173,7 +173,7 @@ where
impl<const LANES: usize> core::ops::BitOrAssign for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
@ -183,7 +183,7 @@ where
impl<const LANES: usize> core::ops::BitOrAssign<bool> for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
@ -193,7 +193,7 @@ where
impl<const LANES: usize> core::ops::BitXorAssign for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
@ -203,7 +203,7 @@ where
impl<const LANES: usize> core::ops::BitXorAssign<bool> for BitMask<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {

View File

@ -20,16 +20,16 @@ macro_rules! define_mask {
#[repr(transparent)]
pub struct $name<const $lanes: usize>($type)
where
$type: crate::LanesAtMost64;
$type: crate::LanesAtMost32;
impl<const LANES: usize> Copy for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{}
impl<const LANES: usize> Clone for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn clone(&self) -> Self {
@ -39,7 +39,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
@ -75,11 +75,30 @@ macro_rules! define_mask {
0
}
}
/// Creates a mask from an integer vector.
///
/// # Safety
/// All lanes must be either 0 or -1.
#[inline]
pub unsafe fn from_int_unchecked(value: $type) -> Self {
Self(value)
}
/// Creates a mask from an integer vector.
///
/// # Panics
/// Panics if any lane is not 0 or -1.
#[inline]
pub fn from_int(value: $type) -> Self {
use core::convert::TryInto;
value.try_into().unwrap()
}
}
impl<const $lanes: usize> core::convert::From<bool> for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn from(value: bool) -> Self {
Self::splat(value)
@ -88,7 +107,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::convert::TryFrom<$type> for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Error = TryFromMaskError;
fn try_from(value: $type) -> Result<Self, Self::Error> {
@ -102,7 +121,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::convert::From<$name<$lanes>> for $type
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn from(value: $name<$lanes>) -> Self {
value.0
@ -111,8 +130,8 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::convert::From<crate::BitMask<$lanes>> for $name<$lanes>
where
$type: crate::LanesAtMost64,
crate::BitMask<$lanes>: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
crate::BitMask<$lanes>: crate::LanesAtMost32,
{
fn from(value: crate::BitMask<$lanes>) -> Self {
// TODO use an intrinsic to do this efficiently (with LLVM's sext instruction)
@ -126,8 +145,8 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::convert::From<$name<$lanes>> for crate::BitMask<$lanes>
where
$type: crate::LanesAtMost64,
crate::BitMask<$lanes>: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
crate::BitMask<$lanes>: crate::LanesAtMost32,
{
fn from(value: $name<$lanes>) -> Self {
// TODO use an intrinsic to do this efficiently (with LLVM's trunc instruction)
@ -141,7 +160,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::fmt::Debug for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_list()
@ -152,7 +171,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::fmt::Binary for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Binary::fmt(&self.0, f)
@ -161,7 +180,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::fmt::Octal for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Octal::fmt(&self.0, f)
@ -170,7 +189,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::fmt::LowerHex for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::LowerHex::fmt(&self.0, f)
@ -179,7 +198,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::fmt::UpperHex for $name<$lanes>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::UpperHex::fmt(&self.0, f)
@ -188,7 +207,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = Self;
#[inline]
@ -199,7 +218,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = Self;
#[inline]
@ -210,7 +229,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -221,7 +240,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = Self;
#[inline]
@ -232,7 +251,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = Self;
#[inline]
@ -243,7 +262,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -254,7 +273,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = Self;
#[inline]
@ -265,7 +284,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = Self;
#[inline]
@ -276,7 +295,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -287,7 +306,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -298,7 +317,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
@ -308,7 +327,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
@ -318,7 +337,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
@ -328,7 +347,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
@ -338,7 +357,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
@ -348,7 +367,7 @@ macro_rules! define_mask {
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
$type: crate::LanesAtMost32,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {

View File

@ -7,7 +7,7 @@ pub use full_masks::*;
mod bitmask;
pub use bitmask::*;
use crate::LanesAtMost64;
use crate::LanesAtMost32;
macro_rules! define_opaque_mask {
{
@ -17,17 +17,39 @@ macro_rules! define_opaque_mask {
} => {
$(#[$attr])*
#[allow(non_camel_case_types)]
pub struct $name<const $lanes: usize>($inner_ty) where $bits_ty: LanesAtMost64;
pub struct $name<const $lanes: usize>($inner_ty) where $bits_ty: LanesAtMost32;
impl<const $lanes: usize> $name<$lanes>
where
$bits_ty: LanesAtMost64
$bits_ty: LanesAtMost32
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
Self(<$inner_ty>::splat(value))
}
/// Converts an array to a SIMD vector.
pub fn from_array(array: [bool; LANES]) -> Self {
let mut vector = Self::splat(false);
let mut i = 0;
while i < $lanes {
vector.set(i, array[i]);
i += 1;
}
vector
}
/// Converts a SIMD vector to an array.
pub fn to_array(self) -> [bool; LANES] {
let mut array = [false; LANES];
let mut i = 0;
while i < $lanes {
array[i] = self.test(i);
i += 1;
}
array
}
/// Tests the value of the specified lane.
///
/// # Panics
@ -49,8 +71,8 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<BitMask<$lanes>> for $name<$lanes>
where
$bits_ty: LanesAtMost64,
BitMask<$lanes>: LanesAtMost64,
$bits_ty: LanesAtMost32,
BitMask<$lanes>: LanesAtMost32,
{
fn from(value: BitMask<$lanes>) -> Self {
Self(value.into())
@ -59,8 +81,8 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<$name<$lanes>> for crate::BitMask<$lanes>
where
$bits_ty: LanesAtMost64,
BitMask<$lanes>: LanesAtMost64,
$bits_ty: LanesAtMost32,
BitMask<$lanes>: LanesAtMost32,
{
fn from(value: $name<$lanes>) -> Self {
value.0.into()
@ -69,7 +91,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<$inner_ty> for $name<$lanes>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
fn from(value: $inner_ty) -> Self {
Self(value)
@ -78,22 +100,35 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<$name<$lanes>> for $inner_ty
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
fn from(value: $name<$lanes>) -> Self {
value.0
}
}
// vector/array conversion
impl<const $lanes: usize> From<[bool; $lanes]> for $name<$lanes> where $bits_ty: crate::LanesAtMost32 {
fn from(array: [bool; $lanes]) -> Self {
Self::from_array(array)
}
}
impl <const $lanes: usize> From<$name<$lanes>> for [bool; $lanes] where $bits_ty: crate::LanesAtMost32 {
fn from(vector: $name<$lanes>) -> Self {
vector.to_array()
}
}
impl<const $lanes: usize> Copy for $name<$lanes>
where
$inner_ty: Copy,
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{}
impl<const $lanes: usize> Clone for $name<$lanes>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn clone(&self) -> Self {
@ -103,7 +138,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> Default for $name<$lanes>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn default() -> Self {
@ -113,7 +148,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> PartialEq for $name<$lanes>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
@ -123,7 +158,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> PartialOrd for $name<$lanes>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
@ -133,7 +168,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> core::fmt::Debug for $name<$lanes>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Debug::fmt(&self.0, f)
@ -142,7 +177,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -153,7 +188,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -164,7 +199,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -175,7 +210,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -186,7 +221,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -197,7 +232,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -208,7 +243,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -219,7 +254,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = Self;
#[inline]
@ -230,7 +265,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -241,7 +276,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
type Output = $name<LANES>;
#[inline]
@ -252,7 +287,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
@ -262,7 +297,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
@ -272,7 +307,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
@ -282,7 +317,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
@ -292,7 +327,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
@ -302,7 +337,7 @@ macro_rules! define_opaque_mask {
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
$bits_ty: LanesAtMost32,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
@ -360,73 +395,6 @@ define_opaque_mask! {
@bits crate::SimdIsize<LANES>
}
macro_rules! implement_mask_ops {
{ $($vector:ident => $mask:ident ($inner_ty:ident),)* } => {
$(
impl<const LANES: usize> crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$inner_ty<LANES>: LanesAtMost64,
{
/// Test if each lane is equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_eq(&self, other: &Self) -> $mask<LANES> {
unsafe { $mask(crate::intrinsics::simd_eq(self, other)) }
}
/// Test if each lane is not equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_ne(&self, other: &Self) -> $mask<LANES> {
unsafe { $mask(crate::intrinsics::simd_ne(self, other)) }
}
/// Test if each lane is less than the corresponding lane in `other`.
#[inline]
pub fn lanes_lt(&self, other: &Self) -> $mask<LANES> {
unsafe { $mask(crate::intrinsics::simd_lt(self, other)) }
}
/// Test if each lane is greater than the corresponding lane in `other`.
#[inline]
pub fn lanes_gt(&self, other: &Self) -> $mask<LANES> {
unsafe { $mask(crate::intrinsics::simd_gt(self, other)) }
}
/// Test if each lane is less than or equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_le(&self, other: &Self) -> $mask<LANES> {
unsafe { $mask(crate::intrinsics::simd_le(self, other)) }
}
/// Test if each lane is greater than or equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_ge(&self, other: &Self) -> $mask<LANES> {
unsafe { $mask(crate::intrinsics::simd_ge(self, other)) }
}
}
)*
}
}
implement_mask_ops! {
SimdI8 => Mask8 (SimdI8),
SimdI16 => Mask16 (SimdI16),
SimdI32 => Mask32 (SimdI32),
SimdI64 => Mask64 (SimdI64),
SimdI128 => Mask128 (SimdI128),
SimdIsize => MaskSize (SimdIsize),
SimdU8 => Mask8 (SimdI8),
SimdU16 => Mask16 (SimdI16),
SimdU32 => Mask32 (SimdI32),
SimdU64 => Mask64 (SimdI64),
SimdU128 => Mask128 (SimdI128),
SimdUsize => MaskSize (SimdIsize),
SimdF32 => Mask32 (SimdI32),
SimdF64 => Mask64 (SimdI64),
}
/// Vector of eight 8-bit masks
pub type mask8x8 = Mask8<8>;

View File

@ -1,6 +1,6 @@
macro_rules! impl_uint_arith {
($(($name:ident, $n:ty)),+) => {
$( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost64 {
$( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
/// Lanewise saturating add.
///
@ -42,7 +42,7 @@ macro_rules! impl_uint_arith {
macro_rules! impl_int_arith {
($(($name:ident, $n:ty)),+) => {
$( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost64 {
$( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
/// Lanewise saturating add.
///

View File

@ -1,4 +1,4 @@
use crate::LanesAtMost64;
use crate::LanesAtMost32;
/// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
fn invalid_shift_rhs<T>(rhs: T) -> bool
@ -16,7 +16,7 @@ macro_rules! impl_ref_ops {
{
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
where
$($bound:path: LanesAtMost64,)*
$($bound:path: LanesAtMost32,)*
{
type Output = $output:ty;
@ -26,7 +26,7 @@ macro_rules! impl_ref_ops {
} => {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
type Output = $output;
@ -36,7 +36,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
@ -48,7 +48,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
@ -60,7 +60,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
@ -75,7 +75,7 @@ macro_rules! impl_ref_ops {
{
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
where
$($bound:path: LanesAtMost64,)*
$($bound:path: LanesAtMost32,)*
{
$(#[$attrs:meta])*
fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt
@ -83,7 +83,7 @@ macro_rules! impl_ref_ops {
} => {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
@ -91,7 +91,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
@ -104,7 +104,7 @@ macro_rules! impl_ref_ops {
{
impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty
where
$($bound:path: LanesAtMost64,)*
$($bound:path: LanesAtMost32,)*
{
type Output = $output:ty;
fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
@ -112,7 +112,7 @@ macro_rules! impl_ref_ops {
} => {
impl<const $lanes: usize> core::ops::$trait for $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
type Output = $output;
fn $fn($self_tok) -> Self::Output $body
@ -120,7 +120,7 @@ macro_rules! impl_ref_ops {
impl<const $lanes: usize> core::ops::$trait for &'_ $type
where
$($bound: LanesAtMost64,)*
$($bound: LanesAtMost32,)*
{
type Output = <$type as core::ops::$trait>::Output;
fn $fn($self_tok) -> Self::Output {
@ -167,7 +167,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Not for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
type Output = Self;
fn not(self) -> Self::Output {
@ -181,7 +181,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
type Output = Self;
fn neg(self) -> Self::Output {
@ -195,9 +195,9 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::SimdU32<LANES>: LanesAtMost64,
crate::SimdU64<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
crate::SimdU32<LANES>: LanesAtMost32,
crate::SimdU64<LANES>: LanesAtMost32,
{
type Output = Self;
fn neg(self) -> Self::Output {
@ -212,7 +212,7 @@ macro_rules! impl_op {
{ impl Index for $type:ident, $scalar:ty } => {
impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
I: core::slice::SliceIndex<[$scalar]>,
{
type Output = I::Output;
@ -224,7 +224,7 @@ macro_rules! impl_op {
impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
where
Self: LanesAtMost64,
Self: LanesAtMost32,
I: core::slice::SliceIndex<[$scalar]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
@ -239,7 +239,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<Self> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
type Output = Self;
@ -255,7 +255,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<$scalar> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
type Output = Self;
@ -269,7 +269,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<crate::$type<LANES>> for $scalar
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
type Output = crate::$type<LANES>;
@ -283,7 +283,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$assign_trait<Self> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
#[inline]
fn $assign_trait_fn(&mut self, rhs: Self) {
@ -297,7 +297,7 @@ macro_rules! impl_op {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$assign_trait<$scalar> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::$type<LANES>: LanesAtMost32,
{
#[inline]
fn $assign_trait_fn(&mut self, rhs: $scalar) {
@ -343,7 +343,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -371,7 +371,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -394,7 +394,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<crate::$vector<LANES>> for $scalar
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = crate::$vector<LANES>;
@ -408,7 +408,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::DivAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn div_assign(&mut self, rhs: Self) {
@ -420,7 +420,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::DivAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn div_assign(&mut self, rhs: $scalar) {
@ -433,7 +433,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -461,7 +461,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -484,7 +484,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<crate::$vector<LANES>> for $scalar
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = crate::$vector<LANES>;
@ -498,7 +498,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::RemAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn rem_assign(&mut self, rhs: Self) {
@ -510,7 +510,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::RemAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn rem_assign(&mut self, rhs: $scalar) {
@ -523,7 +523,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shl<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -545,7 +545,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shl<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -564,7 +564,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShlAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn shl_assign(&mut self, rhs: Self) {
@ -576,7 +576,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShlAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn shl_assign(&mut self, rhs: $scalar) {
@ -588,7 +588,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shr<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -610,7 +610,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shr<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
type Output = Self;
@ -629,7 +629,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShrAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn shr_assign(&mut self, rhs: Self) {
@ -641,7 +641,7 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShrAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$vector<LANES>: LanesAtMost32,
{
#[inline]
fn shr_assign(&mut self, rhs: $scalar) {

View File

@ -24,6 +24,5 @@ macro_rules! impl_shuffle_2pow_lanes {
impl_shuffle_lane!{ $name, simd_shuffle8, 8 }
impl_shuffle_lane!{ $name, simd_shuffle16, 16 }
impl_shuffle_lane!{ $name, simd_shuffle32, 32 }
impl_shuffle_lane!{ $name, simd_shuffle64, 64 }
}
}

View File

@ -4,7 +4,7 @@ macro_rules! implement {
} => {
impl<const LANES: usize> crate::$type<LANES>
where
Self: crate::LanesAtMost64,
Self: crate::LanesAtMost32,
{
/// Returns the largest integer less than or equal to each lane.
#[cfg(feature = "std")]
@ -25,8 +25,8 @@ macro_rules! implement {
impl<const LANES: usize> crate::$type<LANES>
where
Self: crate::LanesAtMost64,
crate::$int_type<LANES>: crate::LanesAtMost64,
Self: crate::LanesAtMost32,
crate::$int_type<LANES>: crate::LanesAtMost32,
{
/// Rounds toward zero and converts to the same-width integer type, assuming that
/// the value is finite and fits in that type.

View File

@ -4,13 +4,13 @@
/// `$lanes` of float `$type`, which uses `$bits_ty` as its binary
/// representation. Called from `define_float_vector!`.
macro_rules! impl_float_vector {
{ $name:ident, $type:ty, $bits_ty:ident } => {
{ $name:ident, $type:ty, $bits_ty:ident, $mask_ty:ident, $mask_impl_ty:ident } => {
impl_vector! { $name, $type }
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost64,
crate::$bits_ty<LANES>: crate::LanesAtMost64,
Self: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
{
/// Raw transmutation to an unsigned integer vector type with the
/// same size and number of lanes.
@ -36,17 +36,69 @@ macro_rules! impl_float_vector {
Self::from_bits(self.to_bits() & no_sign)
}
}
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost32,
crate::$bits_ty<LANES>: crate::LanesAtMost32,
crate::$mask_impl_ty<LANES>: crate::LanesAtMost32,
{
/// Returns true for each lane if it has a positive sign, including
/// `+0.0`, `NaN`s with positive sign bit and positive infinity.
#[inline]
pub fn is_sign_positive(self) -> crate::$mask_ty<LANES> {
!self.is_sign_negative()
}
/// Returns true for each lane if it has a negative sign, including
/// `-0.0`, `NaN`s with negative sign bit and negative infinity.
#[inline]
pub fn is_sign_negative(self) -> crate::$mask_ty<LANES> {
let sign_bits = self.to_bits() & crate::$bits_ty::splat((!0 >> 1) + 1);
sign_bits.lanes_gt(crate::$bits_ty::splat(0))
}
/// Returns true for each lane if its value is `NaN`.
#[inline]
pub fn is_nan(self) -> crate::$mask_ty<LANES> {
self.lanes_ne(self)
}
/// Returns true for each lane if its value is positive infinity or negative infinity.
#[inline]
pub fn is_infinite(self) -> crate::$mask_ty<LANES> {
self.abs().lanes_eq(Self::splat(<$type>::INFINITY))
}
/// Returns true for each lane if its value is neither infinite nor `NaN`.
#[inline]
pub fn is_finite(self) -> crate::$mask_ty<LANES> {
self.abs().lanes_lt(Self::splat(<$type>::INFINITY))
}
/// Returns true for each lane if its value is subnormal.
#[inline]
pub fn is_subnormal(self) -> crate::$mask_ty<LANES> {
self.abs().lanes_ne(Self::splat(0.0)) & (self.to_bits() & Self::splat(<$type>::INFINITY).to_bits()).lanes_eq(crate::$bits_ty::splat(0))
}
/// Returns true for each lane if its value is neither neither zero, infinite,
/// subnormal, or `NaN`.
#[inline]
pub fn is_normal(self) -> crate::$mask_ty<LANES> {
!(self.abs().lanes_eq(Self::splat(0.0)) | self.is_nan() | self.is_subnormal() | self.is_infinite())
}
}
};
}
/// A SIMD vector of containing `LANES` `f32` values.
#[repr(simd)]
pub struct SimdF32<const LANES: usize>([f32; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_float_vector! { SimdF32, f32, SimdU32 }
impl_float_vector! { SimdF32, f32, SimdU32, Mask32, SimdI32 }
from_transmute_x86! { unsafe f32x4 => __m128 }
from_transmute_x86! { unsafe f32x8 => __m256 }
@ -56,9 +108,9 @@ from_transmute_x86! { unsafe f32x8 => __m256 }
#[repr(simd)]
pub struct SimdF64<const LANES: usize>([f64; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_float_vector! { SimdF64, f64, SimdU64 }
impl_float_vector! { SimdF64, f64, SimdU64, Mask64, SimdI64 }
from_transmute_x86! { unsafe f64x2 => __m128d }
from_transmute_x86! { unsafe f64x4 => __m256d }

View File

@ -2,12 +2,12 @@
/// Implements additional integer traits (Eq, Ord, Hash) on the specified vector `$name`, holding multiple `$lanes` of `$type`.
macro_rules! impl_integer_vector {
{ $name:ident, $type:ty } => {
{ $name:ident, $type:ty, $mask_ty:ident, $mask_impl_ty:ident } => {
impl_vector! { $name, $type }
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost64 {}
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
// TODO use SIMD cmp
@ -15,7 +15,7 @@ macro_rules! impl_integer_vector {
}
}
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn hash<H>(&self, state: &mut H)
where
@ -24,6 +24,22 @@ macro_rules! impl_integer_vector {
self.as_slice().hash(state)
}
}
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost32,
crate::$mask_impl_ty<LANES>: crate::LanesAtMost32,
{
/// Returns true for each positive lane and false if it is zero or negative.
pub fn is_positive(self) -> crate::$mask_ty<LANES> {
self.lanes_gt(Self::splat(0))
}
/// Returns true for each negative lane and false if it is zero or positive.
pub fn is_negative(self) -> crate::$mask_ty<LANES> {
self.lanes_lt(Self::splat(0))
}
}
}
}
@ -31,9 +47,9 @@ macro_rules! impl_integer_vector {
#[repr(simd)]
pub struct SimdIsize<const LANES: usize>([isize; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_integer_vector! { SimdIsize, isize }
impl_integer_vector! { SimdIsize, isize, MaskSize, SimdIsize }
#[cfg(target_pointer_width = "32")]
from_transmute_x86! { unsafe isizex4 => __m128i }
@ -51,9 +67,9 @@ from_transmute_x86! { unsafe isizex4 => __m256i }
#[repr(simd)]
pub struct SimdI128<const LANES: usize>([i128; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_integer_vector! { SimdI128, i128 }
impl_integer_vector! { SimdI128, i128, Mask128, SimdI128 }
from_transmute_x86! { unsafe i128x2 => __m256i }
//from_transmute_x86! { unsafe i128x4 => __m512i }
@ -62,9 +78,9 @@ from_transmute_x86! { unsafe i128x2 => __m256i }
#[repr(simd)]
pub struct SimdI16<const LANES: usize>([i16; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_integer_vector! { SimdI16, i16 }
impl_integer_vector! { SimdI16, i16, Mask16, SimdI16 }
from_transmute_x86! { unsafe i16x8 => __m128i }
from_transmute_x86! { unsafe i16x16 => __m256i }
@ -74,9 +90,9 @@ from_transmute_x86! { unsafe i16x16 => __m256i }
#[repr(simd)]
pub struct SimdI32<const LANES: usize>([i32; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_integer_vector! { SimdI32, i32 }
impl_integer_vector! { SimdI32, i32, Mask32, SimdI32 }
from_transmute_x86! { unsafe i32x4 => __m128i }
from_transmute_x86! { unsafe i32x8 => __m256i }
@ -86,9 +102,9 @@ from_transmute_x86! { unsafe i32x8 => __m256i }
#[repr(simd)]
pub struct SimdI64<const LANES: usize>([i64; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_integer_vector! { SimdI64, i64 }
impl_integer_vector! { SimdI64, i64, Mask64, SimdI64 }
from_transmute_x86! { unsafe i64x2 => __m128i }
from_transmute_x86! { unsafe i64x4 => __m256i }
@ -98,9 +114,9 @@ from_transmute_x86! { unsafe i64x4 => __m256i }
#[repr(simd)]
pub struct SimdI8<const LANES: usize>([i8; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_integer_vector! { SimdI8, i8 }
impl_integer_vector! { SimdI8, i8, Mask8, SimdI8 }
from_transmute_x86! { unsafe i8x16 => __m128i }
from_transmute_x86! { unsafe i8x32 => __m256i }

View File

@ -6,9 +6,9 @@ macro_rules! impl_unsigned_vector {
{ $name:ident, $type:ty } => {
impl_vector! { $name, $type }
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost64 {}
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost32 {}
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
// TODO use SIMD cmp
@ -16,7 +16,7 @@ macro_rules! impl_unsigned_vector {
}
}
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost64 {
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost32 {
#[inline]
fn hash<H>(&self, state: &mut H)
where
@ -32,7 +32,7 @@ macro_rules! impl_unsigned_vector {
#[repr(simd)]
pub struct SimdUsize<const LANES: usize>([usize; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_unsigned_vector! { SimdUsize, usize }
@ -52,7 +52,7 @@ from_transmute_x86! { unsafe usizex4 => __m256i }
#[repr(simd)]
pub struct SimdU128<const LANES: usize>([u128; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_unsigned_vector! { SimdU128, u128 }
@ -63,7 +63,7 @@ from_transmute_x86! { unsafe u128x2 => __m256i }
#[repr(simd)]
pub struct SimdU16<const LANES: usize>([u16; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_unsigned_vector! { SimdU16, u16 }
@ -75,7 +75,7 @@ from_transmute_x86! { unsafe u16x16 => __m256i }
#[repr(simd)]
pub struct SimdU32<const LANES: usize>([u32; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_unsigned_vector! { SimdU32, u32 }
@ -87,7 +87,7 @@ from_transmute_x86! { unsafe u32x8 => __m256i }
#[repr(simd)]
pub struct SimdU64<const LANES: usize>([u64; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_unsigned_vector! { SimdU64, u64 }
@ -99,7 +99,7 @@ from_transmute_x86! { unsafe u64x4 => __m256i }
#[repr(simd)]
pub struct SimdU8<const LANES: usize>([u8; LANES])
where
Self: crate::LanesAtMost64;
Self: crate::LanesAtMost32;
impl_unsigned_vector! { SimdU8, u8 }

View File

@ -1,3 +1,5 @@
#![feature(is_subnormal)]
#[macro_use]
mod ops_macros;
impl_float_tests! { SimdF32, f32, i32 }

View File

@ -1,3 +1,5 @@
#![feature(is_subnormal)]
#[macro_use]
mod ops_macros;
impl_float_tests! { SimdF64, f64, i64 }

View File

@ -1,4 +1,3 @@
mask_tests! { mask8x8, 8 }
mask_tests! { mask8x16, 16 }
mask_tests! { mask8x32, 32 }
mask_tests! { mask8x64, 64 }

View File

@ -147,11 +147,27 @@ macro_rules! impl_signed_tests {
test_helpers::test_lanes! {
fn neg<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&<Vector<LANES> as core::ops::Neg>::neg,
&<Vector::<LANES> as core::ops::Neg>::neg,
&<Scalar as core::ops::Neg>::neg,
&|x| !x.contains(&Scalar::MIN),
);
}
fn is_positive<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_positive,
&Scalar::is_positive,
&|_| true,
);
}
fn is_negative<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_negative,
&Scalar::is_negative,
&|_| true,
);
}
}
test_helpers::test_lanes_panic! {
@ -285,6 +301,62 @@ macro_rules! impl_float_tests {
}
test_helpers::test_lanes! {
fn is_sign_positive<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_sign_positive,
&Scalar::is_sign_positive,
&|_| true,
);
}
fn is_sign_negative<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_sign_negative,
&Scalar::is_sign_negative,
&|_| true,
);
}
fn is_finite<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_finite,
&Scalar::is_finite,
&|_| true,
);
}
fn is_infinite<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_infinite,
&Scalar::is_infinite,
&|_| true,
);
}
fn is_nan<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_nan,
&Scalar::is_nan,
&|_| true,
);
}
fn is_normal<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_normal,
&Scalar::is_normal,
&|_| true,
);
}
fn is_subnormal<const LANES: usize>() {
test_helpers::test_unary_mask_elementwise(
&Vector::<LANES>::is_subnormal,
&Scalar::is_subnormal,
&|_| true,
);
}
fn abs<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&Vector::<LANES>::abs,

View File

@ -5,6 +5,16 @@ pub trait BitEq {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result;
}
impl BitEq for bool {
fn biteq(&self, other: &Self) -> bool {
self == other
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?}", self)
}
}
macro_rules! impl_integer_biteq {
{ $($type:ty),* } => {
$(

View File

@ -124,6 +124,32 @@ pub fn test_unary_elementwise<Scalar, ScalarResult, Vector, VectorResult, const
});
}
/// Test a unary vector function against a unary scalar function, applied elementwise.
#[inline(never)]
pub fn test_unary_mask_elementwise<Scalar, Vector, Mask, const LANES: usize>(
fv: &dyn Fn(Vector) -> Mask,
fs: &dyn Fn(Scalar) -> bool,
check: &dyn Fn([Scalar; LANES]) -> bool,
) where
Scalar: Copy + Default + core::fmt::Debug + DefaultStrategy,
Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
Mask: Into<[bool; LANES]> + From<[bool; LANES]> + Copy,
{
test_1(&|x: [Scalar; LANES]| {
proptest::prop_assume!(check(x));
let result_1: [bool; LANES] = fv(x.into()).into();
let result_2: [bool; LANES] = {
let mut result = [false; LANES];
for (i, o) in x.iter().zip(result.iter_mut()) {
*o = fs(*i);
}
result
};
crate::prop_assert_biteq!(result_1, result_2);
Ok(())
});
}
/// Test a binary vector function against a binary scalar function, applied elementwise.
#[inline(never)]
pub fn test_binary_elementwise<
@ -243,21 +269,21 @@ macro_rules! test_lanes {
fn implementation<const $lanes: usize>()
where
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost64,
core_simd::BitMask<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU128<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI128<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost32,
core_simd::BitMask<$lanes>: core_simd::LanesAtMost32,
$body
#[cfg(target_arch = "wasm32")]
@ -298,16 +324,10 @@ macro_rules! test_lanes {
fn lanes_32() {
implementation::<32>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_64() {
implementation::<64>();
}
}
)*
}
}
}
/// Expand a const-generic `#[should_panic]` test into separate tests for each possible lane count.
#[macro_export]
@ -321,21 +341,21 @@ macro_rules! test_lanes_panic {
fn implementation<const $lanes: usize>()
where
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost64,
core_simd::BitMask<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdU128<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdI128<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost32,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost32,
core_simd::BitMask<$lanes>: core_simd::LanesAtMost32,
$body
#[test]
@ -373,13 +393,7 @@ macro_rules! test_lanes_panic {
fn lanes_32() {
implementation::<32>();
}
#[test]
#[should_panic]
fn lanes_64() {
implementation::<64>();
}
}
)*
}
}
}