Sync portable-simd to 2023 May 10
Sync up to rust-lang/portable-simd@852762563a
This commit is contained in:
commit
b05d7e5bfa
@ -241,6 +241,10 @@ jobs:
|
||||
- "--features std"
|
||||
- "--features generic_const_exprs"
|
||||
- "--features std --features generic_const_exprs"
|
||||
- "--features all_lane_counts"
|
||||
- "--features all_lane_counts --features std"
|
||||
- "--features all_lane_counts --features generic_const_exprs"
|
||||
- "--features all_lane_counts --features std --features generic_const_exprs"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
@ -24,19 +24,10 @@ or by setting up `rustup default nightly` or else with `cargo +nightly {build,te
|
||||
```bash
|
||||
cargo new hellosimd
|
||||
```
|
||||
to create a new crate. Edit `hellosimd/Cargo.toml` to be
|
||||
```toml
|
||||
[package]
|
||||
name = "hellosimd"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
[dependencies]
|
||||
core_simd = { git = "https://github.com/rust-lang/portable-simd" }
|
||||
```
|
||||
|
||||
and finally write this in `src/main.rs`:
|
||||
to create a new crate. Finally write this in `src/main.rs`:
|
||||
```rust
|
||||
use core_simd::*;
|
||||
#![feature(portable_simd)]
|
||||
use std::simd::f32x4;
|
||||
fn main() {
|
||||
let a = f32x4::splat(10.0);
|
||||
let b = f32x4::from_array([1.0, 2.0, 3.0, 4.0]);
|
||||
@ -44,24 +35,23 @@ fn main() {
|
||||
}
|
||||
```
|
||||
|
||||
Explanation: We import all the bindings from the crate with the first line. Then, we construct our SIMD vectors with methods like `splat` or `from_array`. Finally, we can use operators on them like `+` and the appropriate SIMD instructions will be carried out. When we run `cargo run` you should get `[11.0, 12.0, 13.0, 14.0]`.
|
||||
Explanation: We construct our SIMD vectors with methods like `splat` or `from_array`. Next, we can use operators like `+` on them, and the appropriate SIMD instructions will be carried out. When we run `cargo run` you should get `[11.0, 12.0, 13.0, 14.0]`.
|
||||
|
||||
## Code Organization
|
||||
## Supported vectors
|
||||
|
||||
Currently the crate is organized so that each element type is a file, and then the 64-bit, 128-bit, 256-bit, and 512-bit vectors using those types are contained in said file.
|
||||
|
||||
All types are then exported as a single, flat module.
|
||||
Currently, vectors may have up to 64 elements, but aliases are provided only up to 512-bit vectors.
|
||||
|
||||
Depending on the size of the primitive type, the number of lanes the vector will have varies. For example, 128-bit vectors have four `f32` lanes and two `f64` lanes.
|
||||
|
||||
The supported element types are as follows:
|
||||
* **Floating Point:** `f32`, `f64`
|
||||
* **Signed Integers:** `i8`, `i16`, `i32`, `i64`, `i128`, `isize`
|
||||
* **Unsigned Integers:** `u8`, `u16`, `u32`, `u64`, `u128`, `usize`
|
||||
* **Masks:** `mask8`, `mask16`, `mask32`, `mask64`, `mask128`, `masksize`
|
||||
* **Signed Integers:** `i8`, `i16`, `i32`, `i64`, `isize` (`i128` excluded)
|
||||
* **Unsigned Integers:** `u8`, `u16`, `u32`, `u64`, `usize` (`u128` excluded)
|
||||
* **Pointers:** `*const T` and `*mut T` (zero-sized metadata only)
|
||||
* **Masks:** 8-bit, 16-bit, 32-bit, 64-bit, and `usize`-sized masks
|
||||
|
||||
Floating point, signed integers, and unsigned integers are the [primitive types](https://doc.rust-lang.org/core/primitive/index.html) you're already used to.
|
||||
The `mask` types are "truthy" values, but they use the number of bits in their name instead of just 1 bit like a normal `bool` uses.
|
||||
Floating point, signed integers, unsigned integers, and pointers are the [primitive types](https://doc.rust-lang.org/core/primitive/index.html) you're already used to.
|
||||
The mask types have elements that are "truthy" values, like `bool`, but have an unspecified layout because different architectures prefer different layouts for mask types.
|
||||
|
||||
[simd-guide]: ./beginners-guide.md
|
||||
[zulip-project-portable-simd]: https://rust-lang.zulipchat.com/#narrow/stream/257879-project-portable-simd
|
||||
|
@ -13,12 +13,11 @@ default = ["as_crate"]
|
||||
as_crate = []
|
||||
std = []
|
||||
generic_const_exprs = []
|
||||
all_lane_counts = []
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dev-dependencies.wasm-bindgen]
|
||||
version = "0.2"
|
||||
|
||||
[dev-dependencies.wasm-bindgen-test]
|
||||
version = "0.3"
|
||||
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
|
||||
wasm-bindgen = "0.2"
|
||||
wasm-bindgen-test = "0.3"
|
||||
|
||||
[dev-dependencies.proptest]
|
||||
version = "0.10"
|
||||
|
13
library/portable-simd/crates/core_simd/examples/README.md
Normal file
13
library/portable-simd/crates/core_simd/examples/README.md
Normal file
@ -0,0 +1,13 @@
|
||||
### `stdsimd` examples
|
||||
|
||||
This crate is a port of example uses of `stdsimd`, mostly taken from the `packed_simd` crate.
|
||||
|
||||
The examples contain, as in the case of `dot_product.rs`, multiple ways of solving the problem, in order to show idiomatic uses of SIMD and iteration of performance designs.
|
||||
|
||||
Run the tests with the command
|
||||
|
||||
```
|
||||
cargo run --example dot_product
|
||||
```
|
||||
|
||||
and verify the code for `dot_product.rs` on your machine.
|
169
library/portable-simd/crates/core_simd/examples/dot_product.rs
Normal file
169
library/portable-simd/crates/core_simd/examples/dot_product.rs
Normal file
@ -0,0 +1,169 @@
|
||||
// Code taken from the `packed_simd` crate
|
||||
// Run this code with `cargo test --example dot_product`
|
||||
//use std::iter::zip;
|
||||
|
||||
#![feature(array_chunks)]
|
||||
#![feature(slice_as_chunks)]
|
||||
// Add these imports to use the stdsimd library
|
||||
#![feature(portable_simd)]
|
||||
use core_simd::simd::*;
|
||||
|
||||
// This is your barebones dot product implementation:
|
||||
// Take 2 vectors, multiply them element wise and *then*
|
||||
// go along the resulting array and add up the result.
|
||||
// In the next example we will see if there
|
||||
// is any difference to adding and multiplying in tandem.
|
||||
pub fn dot_prod_scalar_0(a: &[f32], b: &[f32]) -> f32 {
|
||||
assert_eq!(a.len(), b.len());
|
||||
|
||||
a.iter().zip(b.iter()).map(|(a, b)| a * b).sum()
|
||||
}
|
||||
|
||||
// When dealing with SIMD, it is very important to think about the amount
|
||||
// of data movement and when it happens. We're going over simple computation examples here, and yet
|
||||
// it is not trivial to understand what may or may not contribute to performance
|
||||
// changes. Eventually, you will need tools to inspect the generated assembly and confirm your
|
||||
// hypothesis and benchmarks - we will mention them later on.
|
||||
// With the use of `fold`, we're doing a multiplication,
|
||||
// and then adding it to the sum, one element from both vectors at a time.
|
||||
pub fn dot_prod_scalar_1(a: &[f32], b: &[f32]) -> f32 {
|
||||
assert_eq!(a.len(), b.len());
|
||||
a.iter()
|
||||
.zip(b.iter())
|
||||
.fold(0.0, |a, zipped| a + zipped.0 * zipped.1)
|
||||
}
|
||||
|
||||
// We now move on to the SIMD implementations: notice the following constructs:
|
||||
// `array_chunks::<4>`: mapping this over the vector will let use construct SIMD vectors
|
||||
// `f32x4::from_array`: construct the SIMD vector from a slice
|
||||
// `(a * b).reduce_sum()`: Multiply both f32x4 vectors together, and then reduce them.
|
||||
// This approach essentially uses SIMD to produce a vector of length N/4 of all the products,
|
||||
// and then add those with `sum()`. This is suboptimal.
|
||||
// TODO: ASCII diagrams
|
||||
pub fn dot_prod_simd_0(a: &[f32], b: &[f32]) -> f32 {
|
||||
assert_eq!(a.len(), b.len());
|
||||
// TODO handle remainder when a.len() % 4 != 0
|
||||
a.array_chunks::<4>()
|
||||
.map(|&a| f32x4::from_array(a))
|
||||
.zip(b.array_chunks::<4>().map(|&b| f32x4::from_array(b)))
|
||||
.map(|(a, b)| (a * b).reduce_sum())
|
||||
.sum()
|
||||
}
|
||||
|
||||
// There's some simple ways to improve the previous code:
|
||||
// 1. Make a `zero` `f32x4` SIMD vector that we will be accumulating into
|
||||
// So that there is only one `sum()` reduction when the last `f32x4` has been processed
|
||||
// 2. Exploit Fused Multiply Add so that the multiplication, addition and sinking into the reduciton
|
||||
// happen in the same step.
|
||||
// If the arrays are large, minimizing the data shuffling will lead to great perf.
|
||||
// If the arrays are small, handling the remainder elements when the length isn't a multiple of 4
|
||||
// Can become a problem.
|
||||
pub fn dot_prod_simd_1(a: &[f32], b: &[f32]) -> f32 {
|
||||
assert_eq!(a.len(), b.len());
|
||||
// TODO handle remainder when a.len() % 4 != 0
|
||||
a.array_chunks::<4>()
|
||||
.map(|&a| f32x4::from_array(a))
|
||||
.zip(b.array_chunks::<4>().map(|&b| f32x4::from_array(b)))
|
||||
.fold(f32x4::splat(0.0), |acc, zipped| acc + zipped.0 * zipped.1)
|
||||
.reduce_sum()
|
||||
}
|
||||
|
||||
// A lot of knowledgeable use of SIMD comes from knowing specific instructions that are
|
||||
// available - let's try to use the `mul_add` instruction, which is the fused-multiply-add we were looking for.
|
||||
use std_float::StdFloat;
|
||||
pub fn dot_prod_simd_2(a: &[f32], b: &[f32]) -> f32 {
|
||||
assert_eq!(a.len(), b.len());
|
||||
// TODO handle remainder when a.len() % 4 != 0
|
||||
let mut res = f32x4::splat(0.0);
|
||||
a.array_chunks::<4>()
|
||||
.map(|&a| f32x4::from_array(a))
|
||||
.zip(b.array_chunks::<4>().map(|&b| f32x4::from_array(b)))
|
||||
.for_each(|(a, b)| {
|
||||
res = a.mul_add(b, res);
|
||||
});
|
||||
res.reduce_sum()
|
||||
}
|
||||
|
||||
// Finally, we will write the same operation but handling the loop remainder.
|
||||
const LANES: usize = 4;
|
||||
pub fn dot_prod_simd_3(a: &[f32], b: &[f32]) -> f32 {
|
||||
assert_eq!(a.len(), b.len());
|
||||
|
||||
let (a_extra, a_chunks) = a.as_rchunks();
|
||||
let (b_extra, b_chunks) = b.as_rchunks();
|
||||
|
||||
// These are always true, but for emphasis:
|
||||
assert_eq!(a_chunks.len(), b_chunks.len());
|
||||
assert_eq!(a_extra.len(), b_extra.len());
|
||||
|
||||
let mut sums = [0.0; LANES];
|
||||
for ((x, y), d) in std::iter::zip(a_extra, b_extra).zip(&mut sums) {
|
||||
*d = x * y;
|
||||
}
|
||||
|
||||
let mut sums = f32x4::from_array(sums);
|
||||
std::iter::zip(a_chunks, b_chunks).for_each(|(x, y)| {
|
||||
sums += f32x4::from_array(*x) * f32x4::from_array(*y);
|
||||
});
|
||||
|
||||
sums.reduce_sum()
|
||||
}
|
||||
|
||||
// Finally, we present an iterator version for handling remainders in a scalar fashion at the end of the loop.
|
||||
// Unfortunately, this is allocating 1 `XMM` register on the order of `~len(a)` - we'll see how we can get around it in the
|
||||
// next example.
|
||||
pub fn dot_prod_simd_4(a: &[f32], b: &[f32]) -> f32 {
|
||||
let mut sum = a
|
||||
.array_chunks::<4>()
|
||||
.map(|&a| f32x4::from_array(a))
|
||||
.zip(b.array_chunks::<4>().map(|&b| f32x4::from_array(b)))
|
||||
.map(|(a, b)| a * b)
|
||||
.fold(f32x4::splat(0.0), std::ops::Add::add)
|
||||
.reduce_sum();
|
||||
let remain = a.len() - (a.len() % 4);
|
||||
sum += a[remain..]
|
||||
.iter()
|
||||
.zip(&b[remain..])
|
||||
.map(|(a, b)| a * b)
|
||||
.sum::<f32>();
|
||||
sum
|
||||
}
|
||||
|
||||
// This version allocates a single `XMM` register for accumulation, and the folds don't allocate on top of that.
|
||||
// Notice the the use of `mul_add`, which can do a multiply and an add operation ber iteration.
|
||||
pub fn dot_prod_simd_5(a: &[f32], b: &[f32]) -> f32 {
|
||||
a.array_chunks::<4>()
|
||||
.map(|&a| f32x4::from_array(a))
|
||||
.zip(b.array_chunks::<4>().map(|&b| f32x4::from_array(b)))
|
||||
.fold(f32x4::splat(0.), |acc, (a, b)| a.mul_add(b, acc))
|
||||
.reduce_sum()
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Empty main to make cargo happy
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn smoke_test() {
|
||||
use super::*;
|
||||
let a: Vec<f32> = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
|
||||
let b: Vec<f32> = vec![-8.0, -7.0, -6.0, -5.0, 4.0, 3.0, 2.0, 1.0];
|
||||
let x: Vec<f32> = [0.5; 1003].to_vec();
|
||||
let y: Vec<f32> = [2.0; 1003].to_vec();
|
||||
|
||||
// Basic check
|
||||
assert_eq!(0.0, dot_prod_scalar_0(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_scalar_1(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_simd_0(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_simd_1(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_simd_2(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_simd_3(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_simd_4(&a, &b));
|
||||
assert_eq!(0.0, dot_prod_simd_5(&a, &b));
|
||||
|
||||
// We can handle vectors that are non-multiples of 4
|
||||
assert_eq!(1003.0, dot_prod_simd_3(&x, &y));
|
||||
}
|
||||
}
|
227
library/portable-simd/crates/core_simd/src/alias.rs
Normal file
227
library/portable-simd/crates/core_simd/src/alias.rs
Normal file
@ -0,0 +1,227 @@
|
||||
macro_rules! number {
|
||||
{ 1 } => { "one" };
|
||||
{ 2 } => { "two" };
|
||||
{ 4 } => { "four" };
|
||||
{ 8 } => { "eight" };
|
||||
{ $x:literal } => { stringify!($x) };
|
||||
}
|
||||
|
||||
macro_rules! plural {
|
||||
{ 1 } => { "" };
|
||||
{ $x:literal } => { "s" };
|
||||
}
|
||||
|
||||
macro_rules! alias {
|
||||
{
|
||||
$(
|
||||
$element_ty:ty = {
|
||||
$($alias:ident $num_elements:tt)*
|
||||
}
|
||||
)*
|
||||
} => {
|
||||
$(
|
||||
$(
|
||||
#[doc = concat!("A SIMD vector with ", number!($num_elements), " element", plural!($num_elements), " of type [`", stringify!($element_ty), "`].")]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type $alias = $crate::simd::Simd<$element_ty, $num_elements>;
|
||||
)*
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! mask_alias {
|
||||
{
|
||||
$(
|
||||
$element_ty:ty : $size:literal = {
|
||||
$($alias:ident $num_elements:tt)*
|
||||
}
|
||||
)*
|
||||
} => {
|
||||
$(
|
||||
$(
|
||||
#[doc = concat!("A SIMD mask with ", number!($num_elements), " element", plural!($num_elements), " for vectors with ", $size, " element types.")]
|
||||
///
|
||||
#[doc = concat!(
|
||||
"The layout of this type is unspecified, and may change between platforms and/or Rust versions, and code should not assume that it is equivalent to `[",
|
||||
stringify!($element_ty), "; ", $num_elements, "]`."
|
||||
)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type $alias = $crate::simd::Mask<$element_ty, $num_elements>;
|
||||
)*
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
alias! {
|
||||
i8 = {
|
||||
i8x1 1
|
||||
i8x2 2
|
||||
i8x4 4
|
||||
i8x8 8
|
||||
i8x16 16
|
||||
i8x32 32
|
||||
i8x64 64
|
||||
}
|
||||
|
||||
i16 = {
|
||||
i16x1 1
|
||||
i16x2 2
|
||||
i16x4 4
|
||||
i16x8 8
|
||||
i16x16 16
|
||||
i16x32 32
|
||||
i16x64 64
|
||||
}
|
||||
|
||||
i32 = {
|
||||
i32x1 1
|
||||
i32x2 2
|
||||
i32x4 4
|
||||
i32x8 8
|
||||
i32x16 16
|
||||
i32x32 32
|
||||
i32x64 64
|
||||
}
|
||||
|
||||
i64 = {
|
||||
i64x1 1
|
||||
i64x2 2
|
||||
i64x4 4
|
||||
i64x8 8
|
||||
i64x16 16
|
||||
i64x32 32
|
||||
i64x64 64
|
||||
}
|
||||
|
||||
isize = {
|
||||
isizex1 1
|
||||
isizex2 2
|
||||
isizex4 4
|
||||
isizex8 8
|
||||
isizex16 16
|
||||
isizex32 32
|
||||
isizex64 64
|
||||
}
|
||||
|
||||
u8 = {
|
||||
u8x1 1
|
||||
u8x2 2
|
||||
u8x4 4
|
||||
u8x8 8
|
||||
u8x16 16
|
||||
u8x32 32
|
||||
u8x64 64
|
||||
}
|
||||
|
||||
u16 = {
|
||||
u16x1 1
|
||||
u16x2 2
|
||||
u16x4 4
|
||||
u16x8 8
|
||||
u16x16 16
|
||||
u16x32 32
|
||||
u16x64 64
|
||||
}
|
||||
|
||||
u32 = {
|
||||
u32x1 1
|
||||
u32x2 2
|
||||
u32x4 4
|
||||
u32x8 8
|
||||
u32x16 16
|
||||
u32x32 32
|
||||
u32x64 64
|
||||
}
|
||||
|
||||
u64 = {
|
||||
u64x1 1
|
||||
u64x2 2
|
||||
u64x4 4
|
||||
u64x8 8
|
||||
u64x16 16
|
||||
u64x32 32
|
||||
u64x64 64
|
||||
}
|
||||
|
||||
usize = {
|
||||
usizex1 1
|
||||
usizex2 2
|
||||
usizex4 4
|
||||
usizex8 8
|
||||
usizex16 16
|
||||
usizex32 32
|
||||
usizex64 64
|
||||
}
|
||||
|
||||
f32 = {
|
||||
f32x1 1
|
||||
f32x2 2
|
||||
f32x4 4
|
||||
f32x8 8
|
||||
f32x16 16
|
||||
f32x32 32
|
||||
f32x64 64
|
||||
}
|
||||
|
||||
f64 = {
|
||||
f64x1 1
|
||||
f64x2 2
|
||||
f64x4 4
|
||||
f64x8 8
|
||||
f64x16 16
|
||||
f64x32 32
|
||||
f64x64 64
|
||||
}
|
||||
}
|
||||
|
||||
mask_alias! {
|
||||
i8 : "8-bit" = {
|
||||
mask8x1 1
|
||||
mask8x2 2
|
||||
mask8x4 4
|
||||
mask8x8 8
|
||||
mask8x16 16
|
||||
mask8x32 32
|
||||
mask8x64 64
|
||||
}
|
||||
|
||||
i16 : "16-bit" = {
|
||||
mask16x1 1
|
||||
mask16x2 2
|
||||
mask16x4 4
|
||||
mask16x8 8
|
||||
mask16x16 16
|
||||
mask16x32 32
|
||||
mask16x64 64
|
||||
}
|
||||
|
||||
i32 : "32-bit" = {
|
||||
mask32x1 1
|
||||
mask32x2 2
|
||||
mask32x4 4
|
||||
mask32x8 8
|
||||
mask32x16 16
|
||||
mask32x32 32
|
||||
mask32x64 64
|
||||
}
|
||||
|
||||
i64 : "64-bit" = {
|
||||
mask64x1 1
|
||||
mask64x2 2
|
||||
mask64x4 4
|
||||
mask64x8 8
|
||||
mask64x16 16
|
||||
mask64x32 32
|
||||
mask64x64 64
|
||||
}
|
||||
|
||||
isize : "pointer-sized" = {
|
||||
masksizex1 1
|
||||
masksizex2 2
|
||||
masksizex4 4
|
||||
masksizex8 8
|
||||
masksizex16 16
|
||||
masksizex32 32
|
||||
masksizex64 64
|
||||
}
|
||||
}
|
55
library/portable-simd/crates/core_simd/src/cast.rs
Normal file
55
library/portable-simd/crates/core_simd/src/cast.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use crate::simd::SimdElement;
|
||||
|
||||
/// Supporting trait for `Simd::cast`. Typically doesn't need to be used directly.
|
||||
///
|
||||
/// # Safety
|
||||
/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast` or
|
||||
/// `simd_as` intrinsics.
|
||||
pub unsafe trait SimdCast: SimdElement {}
|
||||
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for i8 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for i16 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for i32 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for i64 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for isize {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for u8 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for u16 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for u32 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for u64 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for usize {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for f32 {}
|
||||
// Safety: primitive number types can be cast to other primitive number types
|
||||
unsafe impl SimdCast for f64 {}
|
||||
|
||||
/// Supporting trait for `Simd::cast_ptr`. Typically doesn't need to be used directly.
|
||||
///
|
||||
/// # Safety
|
||||
/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast_ptr`
|
||||
/// intrinsic.
|
||||
pub unsafe trait SimdCastPtr<T> {}
|
||||
|
||||
// Safety: pointers can be cast to other pointer types
|
||||
unsafe impl<T, U> SimdCastPtr<T> for *const U
|
||||
where
|
||||
U: core::ptr::Pointee,
|
||||
T: core::ptr::Pointee<Metadata = U::Metadata>,
|
||||
{
|
||||
}
|
||||
// Safety: pointers can be cast to other pointer types
|
||||
unsafe impl<T, U> SimdCastPtr<T> for *mut U
|
||||
where
|
||||
U: core::ptr::Pointee,
|
||||
T: core::ptr::Pointee<Metadata = U::Metadata>,
|
||||
{
|
||||
}
|
@ -1,11 +1,15 @@
|
||||
mod const_ptr;
|
||||
mod float;
|
||||
mod int;
|
||||
mod mut_ptr;
|
||||
mod uint;
|
||||
|
||||
mod sealed {
|
||||
pub trait Sealed {}
|
||||
}
|
||||
|
||||
pub use const_ptr::*;
|
||||
pub use float::*;
|
||||
pub use int::*;
|
||||
pub use mut_ptr::*;
|
||||
pub use uint::*;
|
||||
|
141
library/portable-simd/crates/core_simd/src/elements/const_ptr.rs
Normal file
141
library/portable-simd/crates/core_simd/src/elements/const_ptr.rs
Normal file
@ -0,0 +1,141 @@
|
||||
use super::sealed::Sealed;
|
||||
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
|
||||
|
||||
/// Operations on SIMD vectors of constant pointers.
|
||||
pub trait SimdConstPtr: Copy + Sealed {
|
||||
/// Vector of `usize` with the same number of lanes.
|
||||
type Usize;
|
||||
|
||||
/// Vector of `isize` with the same number of lanes.
|
||||
type Isize;
|
||||
|
||||
/// Vector of mutable pointers to the same type.
|
||||
type MutPtr;
|
||||
|
||||
/// Mask type used for manipulating this SIMD vector type.
|
||||
type Mask;
|
||||
|
||||
/// Returns `true` for each lane that is null.
|
||||
fn is_null(self) -> Self::Mask;
|
||||
|
||||
/// Changes constness without changing the type.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::cast_mut`] on each lane.
|
||||
fn cast_mut(self) -> Self::MutPtr;
|
||||
|
||||
/// Gets the "address" portion of the pointer.
|
||||
///
|
||||
/// This method discards pointer semantic metadata, so the result cannot be
|
||||
/// directly cast into a valid pointer.
|
||||
///
|
||||
/// This method semantically discards *provenance* and
|
||||
/// *address-space* information. To properly restore that information, use [`Self::with_addr`].
|
||||
///
|
||||
/// Equivalent to calling [`pointer::addr`] on each lane.
|
||||
fn addr(self) -> Self::Usize;
|
||||
|
||||
/// Creates a new pointer with the given address.
|
||||
///
|
||||
/// This performs the same operation as a cast, but copies the *address-space* and
|
||||
/// *provenance* of `self` to the new pointer.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::with_addr`] on each lane.
|
||||
fn with_addr(self, addr: Self::Usize) -> Self;
|
||||
|
||||
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
|
||||
/// in [`Self::from_exposed_addr`].
|
||||
fn expose_addr(self) -> Self::Usize;
|
||||
|
||||
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
|
||||
///
|
||||
/// Equivalent to calling [`core::ptr::from_exposed_addr`] on each lane.
|
||||
fn from_exposed_addr(addr: Self::Usize) -> Self;
|
||||
|
||||
/// Calculates the offset from a pointer using wrapping arithmetic.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
|
||||
fn wrapping_offset(self, offset: Self::Isize) -> Self;
|
||||
|
||||
/// Calculates the offset from a pointer using wrapping arithmetic.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
|
||||
fn wrapping_add(self, count: Self::Usize) -> Self;
|
||||
|
||||
/// Calculates the offset from a pointer using wrapping arithmetic.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
|
||||
fn wrapping_sub(self, count: Self::Usize) -> Self;
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> Sealed for Simd<*const T, LANES> where
|
||||
LaneCount<LANES>: SupportedLaneCount
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> SimdConstPtr for Simd<*const T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
type Usize = Simd<usize, LANES>;
|
||||
type Isize = Simd<isize, LANES>;
|
||||
type MutPtr = Simd<*mut T, LANES>;
|
||||
type Mask = Mask<isize, LANES>;
|
||||
|
||||
#[inline]
|
||||
fn is_null(self) -> Self::Mask {
|
||||
Simd::splat(core::ptr::null()).simd_eq(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn cast_mut(self) -> Self::MutPtr {
|
||||
self.cast_ptr()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn addr(self) -> Self::Usize {
|
||||
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
|
||||
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
|
||||
// provenance).
|
||||
unsafe { core::mem::transmute_copy(&self) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn with_addr(self, addr: Self::Usize) -> Self {
|
||||
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
|
||||
//
|
||||
// In the mean-time, this operation is defined to be "as if" it was
|
||||
// a wrapping_offset, so we can emulate it as such. This should properly
|
||||
// restore pointer provenance even under today's compiler.
|
||||
self.cast_ptr::<*const u8>()
|
||||
.wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
|
||||
.cast_ptr()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn expose_addr(self) -> Self::Usize {
|
||||
// Safety: `self` is a pointer vector
|
||||
unsafe { intrinsics::simd_expose_addr(self) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_exposed_addr(addr: Self::Usize) -> Self {
|
||||
// Safety: `self` is a pointer vector
|
||||
unsafe { intrinsics::simd_from_exposed_addr(addr) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_offset(self, count: Self::Isize) -> Self {
|
||||
// Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
|
||||
unsafe { intrinsics::simd_arith_offset(self, count) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_add(self, count: Self::Usize) -> Self {
|
||||
self.wrapping_offset(count.cast())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_sub(self, count: Self::Usize) -> Self {
|
||||
self.wrapping_offset(-count.cast::<isize>())
|
||||
}
|
||||
}
|
136
library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs
Normal file
136
library/portable-simd/crates/core_simd/src/elements/mut_ptr.rs
Normal file
@ -0,0 +1,136 @@
|
||||
use super::sealed::Sealed;
|
||||
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
|
||||
|
||||
/// Operations on SIMD vectors of mutable pointers.
|
||||
pub trait SimdMutPtr: Copy + Sealed {
|
||||
/// Vector of `usize` with the same number of lanes.
|
||||
type Usize;
|
||||
|
||||
/// Vector of `isize` with the same number of lanes.
|
||||
type Isize;
|
||||
|
||||
/// Vector of constant pointers to the same type.
|
||||
type ConstPtr;
|
||||
|
||||
/// Mask type used for manipulating this SIMD vector type.
|
||||
type Mask;
|
||||
|
||||
/// Returns `true` for each lane that is null.
|
||||
fn is_null(self) -> Self::Mask;
|
||||
|
||||
/// Changes constness without changing the type.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::cast_const`] on each lane.
|
||||
fn cast_const(self) -> Self::ConstPtr;
|
||||
|
||||
/// Gets the "address" portion of the pointer.
|
||||
///
|
||||
/// This method discards pointer semantic metadata, so the result cannot be
|
||||
/// directly cast into a valid pointer.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::addr`] on each lane.
|
||||
fn addr(self) -> Self::Usize;
|
||||
|
||||
/// Creates a new pointer with the given address.
|
||||
///
|
||||
/// This performs the same operation as a cast, but copies the *address-space* and
|
||||
/// *provenance* of `self` to the new pointer.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::with_addr`] on each lane.
|
||||
fn with_addr(self, addr: Self::Usize) -> Self;
|
||||
|
||||
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
|
||||
/// in [`Self::from_exposed_addr`].
|
||||
fn expose_addr(self) -> Self::Usize;
|
||||
|
||||
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
|
||||
///
|
||||
/// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each lane.
|
||||
fn from_exposed_addr(addr: Self::Usize) -> Self;
|
||||
|
||||
/// Calculates the offset from a pointer using wrapping arithmetic.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
|
||||
fn wrapping_offset(self, offset: Self::Isize) -> Self;
|
||||
|
||||
/// Calculates the offset from a pointer using wrapping arithmetic.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
|
||||
fn wrapping_add(self, count: Self::Usize) -> Self;
|
||||
|
||||
/// Calculates the offset from a pointer using wrapping arithmetic.
|
||||
///
|
||||
/// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
|
||||
fn wrapping_sub(self, count: Self::Usize) -> Self;
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> Sealed for Simd<*mut T, LANES> where LaneCount<LANES>: SupportedLaneCount
|
||||
{}
|
||||
|
||||
impl<T, const LANES: usize> SimdMutPtr for Simd<*mut T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
type Usize = Simd<usize, LANES>;
|
||||
type Isize = Simd<isize, LANES>;
|
||||
type ConstPtr = Simd<*const T, LANES>;
|
||||
type Mask = Mask<isize, LANES>;
|
||||
|
||||
#[inline]
|
||||
fn is_null(self) -> Self::Mask {
|
||||
Simd::splat(core::ptr::null_mut()).simd_eq(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn cast_const(self) -> Self::ConstPtr {
|
||||
self.cast_ptr()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn addr(self) -> Self::Usize {
|
||||
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
|
||||
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
|
||||
// provenance).
|
||||
unsafe { core::mem::transmute_copy(&self) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn with_addr(self, addr: Self::Usize) -> Self {
|
||||
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
|
||||
//
|
||||
// In the mean-time, this operation is defined to be "as if" it was
|
||||
// a wrapping_offset, so we can emulate it as such. This should properly
|
||||
// restore pointer provenance even under today's compiler.
|
||||
self.cast_ptr::<*mut u8>()
|
||||
.wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
|
||||
.cast_ptr()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn expose_addr(self) -> Self::Usize {
|
||||
// Safety: `self` is a pointer vector
|
||||
unsafe { intrinsics::simd_expose_addr(self) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_exposed_addr(addr: Self::Usize) -> Self {
|
||||
// Safety: `self` is a pointer vector
|
||||
unsafe { intrinsics::simd_from_exposed_addr(addr) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_offset(self, count: Self::Isize) -> Self {
|
||||
// Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
|
||||
unsafe { intrinsics::simd_arith_offset(self, count) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_add(self, count: Self::Usize) -> Self {
|
||||
self.wrapping_offset(count.cast())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_sub(self, count: Self::Usize) -> Self {
|
||||
self.wrapping_offset(-count.cast::<isize>())
|
||||
}
|
||||
}
|
@ -1,4 +1,6 @@
|
||||
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdElement, SupportedLaneCount};
|
||||
use crate::simd::{
|
||||
intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdElement, SimdMutPtr, SupportedLaneCount,
|
||||
};
|
||||
|
||||
/// Parallel `PartialEq`.
|
||||
pub trait SimdPartialEq {
|
||||
@ -71,3 +73,37 @@ fn simd_ne(self, other: Self) -> Self::Mask {
|
||||
}
|
||||
|
||||
impl_mask! { i8, i16, i32, i64, isize }
|
||||
|
||||
impl<T, const LANES: usize> SimdPartialEq for Simd<*const T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
type Mask = Mask<isize, LANES>;
|
||||
|
||||
#[inline]
|
||||
fn simd_eq(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_eq(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_ne(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_ne(other.addr())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> SimdPartialEq for Simd<*mut T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
type Mask = Mask<isize, LANES>;
|
||||
|
||||
#[inline]
|
||||
fn simd_eq(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_eq(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_ne(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_ne(other.addr())
|
||||
}
|
||||
}
|
||||
|
@ -1,39 +1,21 @@
|
||||
use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
|
||||
use core::fmt;
|
||||
|
||||
macro_rules! impl_fmt_trait {
|
||||
{ $($trait:ident,)* } => {
|
||||
$(
|
||||
impl<T, const LANES: usize> fmt::$trait for Simd<T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
T: SimdElement + fmt::$trait,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
#[repr(transparent)]
|
||||
struct Wrapper<'a, T: fmt::$trait>(&'a T);
|
||||
|
||||
impl<T: fmt::$trait> fmt::Debug for Wrapper<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
f.debug_list()
|
||||
.entries(self.as_array().iter().map(|x| Wrapper(x)))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
)*
|
||||
impl<T, const LANES: usize> fmt::Debug for Simd<T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
T: SimdElement + fmt::Debug,
|
||||
{
|
||||
/// A `Simd<T, N>` has a debug format like the one for `[T]`:
|
||||
/// ```
|
||||
/// # #![feature(portable_simd)]
|
||||
/// # #[cfg(feature = "as_crate")] use core_simd::simd::Simd;
|
||||
/// # #[cfg(not(feature = "as_crate"))] use core::simd::Simd;
|
||||
/// let floats = Simd::<f32, 4>::splat(-1.0);
|
||||
/// assert_eq!(format!("{:?}", [-1.0; 4]), format!("{:?}", floats));
|
||||
/// ```
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
<[T] as fmt::Debug>::fmt(self.as_array(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl_fmt_trait! {
|
||||
Debug,
|
||||
Binary,
|
||||
LowerExp,
|
||||
UpperExp,
|
||||
Octal,
|
||||
LowerHex,
|
||||
UpperHex,
|
||||
}
|
||||
|
@ -61,9 +61,6 @@
|
||||
/// xor
|
||||
pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
|
||||
|
||||
/// getelementptr (without inbounds)
|
||||
pub(crate) fn simd_arith_offset<T, U>(ptrs: T, offsets: U) -> T;
|
||||
|
||||
/// fptoui/fptosi/uitofp/sitofp
|
||||
/// casting floats to integers is truncating, so it is safe to convert values like e.g. 1.5
|
||||
/// but the truncated value must fit in the target type or the result is poison.
|
||||
@ -150,4 +147,17 @@
|
||||
pub(crate) fn simd_select<M, T>(m: M, yes: T, no: T) -> T;
|
||||
#[allow(unused)]
|
||||
pub(crate) fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
|
||||
|
||||
/// getelementptr (without inbounds)
|
||||
/// equivalent to wrapping_offset
|
||||
pub(crate) fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
|
||||
|
||||
/// equivalent to `T as U` semantics, specifically for pointers
|
||||
pub(crate) fn simd_cast_ptr<T, U>(ptr: T) -> U;
|
||||
|
||||
/// expose a pointer as an address
|
||||
pub(crate) fn simd_expose_addr<T, U>(ptr: T) -> U;
|
||||
|
||||
/// convert an exposed address back to a pointer
|
||||
pub(crate) fn simd_from_exposed_addr<T, U>(addr: T) -> U;
|
||||
}
|
||||
|
@ -23,24 +23,20 @@ pub trait SupportedLaneCount: Sealed {
|
||||
|
||||
impl<const LANES: usize> Sealed for LaneCount<LANES> {}
|
||||
|
||||
impl SupportedLaneCount for LaneCount<1> {
|
||||
type BitMask = [u8; 1];
|
||||
}
|
||||
impl SupportedLaneCount for LaneCount<2> {
|
||||
type BitMask = [u8; 1];
|
||||
}
|
||||
impl SupportedLaneCount for LaneCount<4> {
|
||||
type BitMask = [u8; 1];
|
||||
}
|
||||
impl SupportedLaneCount for LaneCount<8> {
|
||||
type BitMask = [u8; 1];
|
||||
}
|
||||
impl SupportedLaneCount for LaneCount<16> {
|
||||
type BitMask = [u8; 2];
|
||||
}
|
||||
impl SupportedLaneCount for LaneCount<32> {
|
||||
type BitMask = [u8; 4];
|
||||
}
|
||||
impl SupportedLaneCount for LaneCount<64> {
|
||||
type BitMask = [u8; 8];
|
||||
macro_rules! supported_lane_count {
|
||||
($($lanes:literal),+) => {
|
||||
$(
|
||||
impl SupportedLaneCount for LaneCount<$lanes> {
|
||||
type BitMask = [u8; ($lanes + 7) / 8];
|
||||
}
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
supported_lane_count!(1, 2, 4, 8, 16, 32, 64);
|
||||
#[cfg(feature = "all_lane_counts")]
|
||||
supported_lane_count!(
|
||||
3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
||||
31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
||||
56, 57, 58, 59, 60, 61, 62, 63
|
||||
);
|
||||
|
@ -1,5 +1,8 @@
|
||||
#![no_std]
|
||||
#![feature(
|
||||
const_refs_to_cell,
|
||||
const_maybe_uninit_as_mut_ptr,
|
||||
const_mut_refs,
|
||||
convert_float_to_int,
|
||||
decl_macro,
|
||||
intra_doc_pointers,
|
||||
@ -7,7 +10,9 @@
|
||||
repr_simd,
|
||||
simd_ffi,
|
||||
staged_api,
|
||||
stdsimd
|
||||
stdsimd,
|
||||
strict_provenance,
|
||||
ptr_metadata
|
||||
)]
|
||||
#![cfg_attr(feature = "generic_const_exprs", feature(generic_const_exprs))]
|
||||
#![cfg_attr(feature = "generic_const_exprs", allow(incomplete_features))]
|
||||
@ -19,4 +24,3 @@
|
||||
#[path = "mod.rs"]
|
||||
mod core_simd;
|
||||
pub use self::core_simd::simd;
|
||||
pub use simd::*;
|
||||
|
@ -55,6 +55,7 @@ pub unsafe trait MaskElement: SimdElement + Sealed {}
|
||||
macro_rules! impl_element {
|
||||
{ $ty:ty } => {
|
||||
impl Sealed for $ty {
|
||||
#[inline]
|
||||
fn valid<const LANES: usize>(value: Simd<Self, LANES>) -> bool
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
@ -62,6 +63,7 @@ fn valid<const LANES: usize>(value: Simd<Self, LANES>) -> bool
|
||||
(value.simd_eq(Simd::splat(0 as _)) | value.simd_eq(Simd::splat(-1 as _))).all()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn eq(self, other: Self) -> bool { self == other }
|
||||
|
||||
const TRUE: Self = -1;
|
||||
@ -83,7 +85,9 @@ unsafe impl MaskElement for $ty {}
|
||||
///
|
||||
/// Masks represent boolean inclusion/exclusion on a per-lane basis.
|
||||
///
|
||||
/// The layout of this type is unspecified.
|
||||
/// The layout of this type is unspecified, and may change between platforms
|
||||
/// and/or Rust versions, and code should not assume that it is equivalent to
|
||||
/// `[T; LANES]`.
|
||||
#[repr(transparent)]
|
||||
pub struct Mask<T, const LANES: usize>(mask_impl::Mask<T, LANES>)
|
||||
where
|
||||
@ -102,6 +106,7 @@ impl<T, const LANES: usize> Clone for Mask<T, LANES>
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
@ -113,11 +118,13 @@ impl<T, const LANES: usize> Mask<T, LANES>
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
/// Construct a mask by setting all lanes to the given value.
|
||||
#[inline]
|
||||
pub fn splat(value: bool) -> Self {
|
||||
Self(mask_impl::Mask::splat(value))
|
||||
}
|
||||
|
||||
/// Converts an array of bools to a SIMD mask.
|
||||
#[inline]
|
||||
pub fn from_array(array: [bool; LANES]) -> Self {
|
||||
// SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
|
||||
// true: 0b_0000_0001
|
||||
@ -134,6 +141,7 @@ pub fn splat(value: bool) -> Self {
|
||||
}
|
||||
|
||||
/// Converts a SIMD mask to an array of bools.
|
||||
#[inline]
|
||||
pub fn to_array(self) -> [bool; LANES] {
|
||||
// This follows mostly the same logic as from_array.
|
||||
// SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of
|
||||
@ -261,6 +269,7 @@ pub fn all(self) -> bool {
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn from(array: [bool; LANES]) -> Self {
|
||||
Self::from_array(array)
|
||||
}
|
||||
@ -271,6 +280,7 @@ pub fn all(self) -> bool {
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn from(vector: Mask<T, LANES>) -> Self {
|
||||
vector.to_array()
|
||||
}
|
||||
@ -520,60 +530,6 @@ fn bitxor_assign(&mut self, rhs: bool) {
|
||||
}
|
||||
}
|
||||
|
||||
/// A mask for SIMD vectors with eight elements of 8 bits.
|
||||
pub type mask8x8 = Mask<i8, 8>;
|
||||
|
||||
/// A mask for SIMD vectors with 16 elements of 8 bits.
|
||||
pub type mask8x16 = Mask<i8, 16>;
|
||||
|
||||
/// A mask for SIMD vectors with 32 elements of 8 bits.
|
||||
pub type mask8x32 = Mask<i8, 32>;
|
||||
|
||||
/// A mask for SIMD vectors with 64 elements of 8 bits.
|
||||
pub type mask8x64 = Mask<i8, 64>;
|
||||
|
||||
/// A mask for SIMD vectors with four elements of 16 bits.
|
||||
pub type mask16x4 = Mask<i16, 4>;
|
||||
|
||||
/// A mask for SIMD vectors with eight elements of 16 bits.
|
||||
pub type mask16x8 = Mask<i16, 8>;
|
||||
|
||||
/// A mask for SIMD vectors with 16 elements of 16 bits.
|
||||
pub type mask16x16 = Mask<i16, 16>;
|
||||
|
||||
/// A mask for SIMD vectors with 32 elements of 16 bits.
|
||||
pub type mask16x32 = Mask<i16, 32>;
|
||||
|
||||
/// A mask for SIMD vectors with two elements of 32 bits.
|
||||
pub type mask32x2 = Mask<i32, 2>;
|
||||
|
||||
/// A mask for SIMD vectors with four elements of 32 bits.
|
||||
pub type mask32x4 = Mask<i32, 4>;
|
||||
|
||||
/// A mask for SIMD vectors with eight elements of 32 bits.
|
||||
pub type mask32x8 = Mask<i32, 8>;
|
||||
|
||||
/// A mask for SIMD vectors with 16 elements of 32 bits.
|
||||
pub type mask32x16 = Mask<i32, 16>;
|
||||
|
||||
/// A mask for SIMD vectors with two elements of 64 bits.
|
||||
pub type mask64x2 = Mask<i64, 2>;
|
||||
|
||||
/// A mask for SIMD vectors with four elements of 64 bits.
|
||||
pub type mask64x4 = Mask<i64, 4>;
|
||||
|
||||
/// A mask for SIMD vectors with eight elements of 64 bits.
|
||||
pub type mask64x8 = Mask<i64, 8>;
|
||||
|
||||
/// A mask for SIMD vectors with two elements of pointer width.
|
||||
pub type masksizex2 = Mask<isize, 2>;
|
||||
|
||||
/// A mask for SIMD vectors with four elements of pointer width.
|
||||
pub type masksizex4 = Mask<isize, 4>;
|
||||
|
||||
/// A mask for SIMD vectors with eight elements of pointer width.
|
||||
pub type masksizex8 = Mask<isize, 8>;
|
||||
|
||||
macro_rules! impl_from {
|
||||
{ $from:ty => $($to:ty),* } => {
|
||||
$(
|
||||
@ -581,6 +537,7 @@ impl<const LANES: usize> From<Mask<$from, LANES>> for Mask<$to, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn from(value: Mask<$from, LANES>) -> Self {
|
||||
value.cast()
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ impl<T, const LANES: usize> Clone for Mask<T, LANES>
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
@ -36,6 +37,7 @@ impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.0.as_ref() == other.0.as_ref()
|
||||
}
|
||||
@ -46,6 +48,7 @@ impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
|
||||
self.0.as_ref().partial_cmp(other.0.as_ref())
|
||||
}
|
||||
@ -63,6 +66,7 @@ impl<T, const LANES: usize> Ord for Mask<T, LANES>
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
|
||||
self.0.as_ref().cmp(other.0.as_ref())
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ impl<T, const LANES: usize> PartialEq for Mask<T, LANES>
|
||||
T: MaskElement + PartialEq,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.0.eq(&other.0)
|
||||
}
|
||||
@ -47,6 +48,7 @@ impl<T, const LANES: usize> PartialOrd for Mask<T, LANES>
|
||||
T: MaskElement + PartialOrd,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
|
||||
self.0.partial_cmp(&other.0)
|
||||
}
|
||||
@ -64,6 +66,7 @@ impl<T, const LANES: usize> Ord for Mask<T, LANES>
|
||||
T: MaskElement + Ord,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
|
||||
self.0.cmp(&other.0)
|
||||
}
|
||||
@ -262,6 +265,7 @@ impl<T, const LANES: usize> From<Mask<T, LANES>> for Simd<T, LANES>
|
||||
T: MaskElement,
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn from(value: Mask<T, LANES>) -> Self {
|
||||
value.0
|
||||
}
|
||||
|
@ -48,10 +48,12 @@ macro_rules! impl_integer_intrinsic {
|
||||
impl<T: MaskElement> ToBitMask for Mask<T, $lanes> {
|
||||
type BitMask = $int;
|
||||
|
||||
#[inline]
|
||||
fn to_bitmask(self) -> $int {
|
||||
self.0.to_bitmask_integer()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_bitmask(bitmask: $int) -> Self {
|
||||
Self(mask_impl::Mask::from_bitmask_integer(bitmask))
|
||||
}
|
||||
@ -83,10 +85,12 @@ impl<T: MaskElement, const LANES: usize> ToBitMaskArray for Mask<T, LANES>
|
||||
{
|
||||
const BYTES: usize = bitmask_len(LANES);
|
||||
|
||||
#[inline]
|
||||
fn to_bitmask_array(self) -> [u8; Self::BYTES] {
|
||||
self.0.to_bitmask_array()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_bitmask_array(bitmask: [u8; Self::BYTES]) -> Self {
|
||||
Mask(mask_impl::Mask::from_bitmask_array(bitmask))
|
||||
}
|
||||
|
@ -6,6 +6,8 @@
|
||||
#[cfg(feature = "generic_const_exprs")]
|
||||
mod to_bytes;
|
||||
|
||||
mod alias;
|
||||
mod cast;
|
||||
mod elements;
|
||||
mod eq;
|
||||
mod fmt;
|
||||
@ -15,6 +17,7 @@
|
||||
mod ops;
|
||||
mod ord;
|
||||
mod select;
|
||||
mod swizzle_dyn;
|
||||
mod vector;
|
||||
mod vendor;
|
||||
|
||||
@ -22,11 +25,14 @@
|
||||
pub mod simd {
|
||||
pub(crate) use crate::core_simd::intrinsics;
|
||||
|
||||
pub use crate::core_simd::alias::*;
|
||||
pub use crate::core_simd::cast::*;
|
||||
pub use crate::core_simd::elements::*;
|
||||
pub use crate::core_simd::eq::*;
|
||||
pub use crate::core_simd::lane_count::{LaneCount, SupportedLaneCount};
|
||||
pub use crate::core_simd::masks::*;
|
||||
pub use crate::core_simd::ord::*;
|
||||
pub use crate::core_simd::swizzle::*;
|
||||
pub use crate::core_simd::swizzle_dyn::*;
|
||||
pub use crate::core_simd::vector::*;
|
||||
}
|
||||
|
@ -1,4 +1,6 @@
|
||||
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
|
||||
use crate::simd::{
|
||||
intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdMutPtr, SimdPartialEq, SupportedLaneCount,
|
||||
};
|
||||
|
||||
/// Parallel `PartialOrd`.
|
||||
pub trait SimdPartialOrd: SimdPartialEq {
|
||||
@ -211,3 +213,101 @@ fn simd_clamp(self, min: Self, max: Self) -> Self {
|
||||
}
|
||||
|
||||
impl_mask! { i8, i16, i32, i64, isize }
|
||||
|
||||
impl<T, const LANES: usize> SimdPartialOrd for Simd<*const T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn simd_lt(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_lt(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_le(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_le(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_gt(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_gt(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_ge(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_ge(other.addr())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> SimdOrd for Simd<*const T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn simd_max(self, other: Self) -> Self {
|
||||
self.simd_lt(other).select(other, self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_min(self, other: Self) -> Self {
|
||||
self.simd_gt(other).select(other, self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_clamp(self, min: Self, max: Self) -> Self {
|
||||
assert!(
|
||||
min.simd_le(max).all(),
|
||||
"each lane in `min` must be less than or equal to the corresponding lane in `max`",
|
||||
);
|
||||
self.simd_max(min).simd_min(max)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> SimdPartialOrd for Simd<*mut T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn simd_lt(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_lt(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_le(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_le(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_gt(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_gt(other.addr())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_ge(self, other: Self) -> Self::Mask {
|
||||
self.addr().simd_ge(other.addr())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const LANES: usize> SimdOrd for Simd<*mut T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
{
|
||||
#[inline]
|
||||
fn simd_max(self, other: Self) -> Self {
|
||||
self.simd_lt(other).select(other, self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_min(self, other: Self) -> Self {
|
||||
self.simd_gt(other).select(other, self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn simd_clamp(self, min: Self, max: Self) -> Self {
|
||||
assert!(
|
||||
min.simd_le(max).all(),
|
||||
"each lane in `min` must be less than or equal to the corresponding lane in `max`",
|
||||
);
|
||||
self.simd_max(min).simd_min(max)
|
||||
}
|
||||
}
|
||||
|
@ -265,16 +265,13 @@ impl<const OFFSET: usize, const LANES: usize> Swizzle<LANES, LANES> for Rotate<O
|
||||
|
||||
/// Interleave two vectors.
|
||||
///
|
||||
/// Produces two vectors with lanes taken alternately from `self` and `other`.
|
||||
/// The resulting vectors contain lanes taken alternatively from `self` and `other`, first
|
||||
/// filling the first result, and then the second.
|
||||
///
|
||||
/// The first result contains the first `LANES / 2` lanes from `self` and `other`,
|
||||
/// alternating, starting with the first lane of `self`.
|
||||
///
|
||||
/// The second result contains the last `LANES / 2` lanes from `self` and `other`,
|
||||
/// alternating, starting with the lane `LANES / 2` from the start of `self`.
|
||||
/// The reverse of this operation is [`Simd::deinterleave`].
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(portable_simd)]
|
||||
/// # #![feature(portable_simd)]
|
||||
/// # use core::simd::Simd;
|
||||
/// let a = Simd::from_array([0, 1, 2, 3]);
|
||||
/// let b = Simd::from_array([4, 5, 6, 7]);
|
||||
@ -285,29 +282,17 @@ impl<const OFFSET: usize, const LANES: usize> Swizzle<LANES, LANES> for Rotate<O
|
||||
#[inline]
|
||||
#[must_use = "method returns a new vector and does not mutate the original inputs"]
|
||||
pub fn interleave(self, other: Self) -> (Self, Self) {
|
||||
const fn lo<const LANES: usize>() -> [Which; LANES] {
|
||||
const fn interleave<const LANES: usize>(high: bool) -> [Which; LANES] {
|
||||
let mut idx = [Which::First(0); LANES];
|
||||
let mut i = 0;
|
||||
while i < LANES {
|
||||
let offset = i / 2;
|
||||
idx[i] = if i % 2 == 0 {
|
||||
Which::First(offset)
|
||||
// Treat the source as a concatenated vector
|
||||
let dst_index = if high { i + LANES } else { i };
|
||||
let src_index = dst_index / 2 + (dst_index % 2) * LANES;
|
||||
idx[i] = if src_index < LANES {
|
||||
Which::First(src_index)
|
||||
} else {
|
||||
Which::Second(offset)
|
||||
};
|
||||
i += 1;
|
||||
}
|
||||
idx
|
||||
}
|
||||
const fn hi<const LANES: usize>() -> [Which; LANES] {
|
||||
let mut idx = [Which::First(0); LANES];
|
||||
let mut i = 0;
|
||||
while i < LANES {
|
||||
let offset = (LANES + i) / 2;
|
||||
idx[i] = if i % 2 == 0 {
|
||||
Which::First(offset)
|
||||
} else {
|
||||
Which::Second(offset)
|
||||
Which::Second(src_index % LANES)
|
||||
};
|
||||
i += 1;
|
||||
}
|
||||
@ -318,11 +303,11 @@ pub fn interleave(self, other: Self) -> (Self, Self) {
|
||||
struct Hi;
|
||||
|
||||
impl<const LANES: usize> Swizzle2<LANES, LANES> for Lo {
|
||||
const INDEX: [Which; LANES] = lo::<LANES>();
|
||||
const INDEX: [Which; LANES] = interleave::<LANES>(false);
|
||||
}
|
||||
|
||||
impl<const LANES: usize> Swizzle2<LANES, LANES> for Hi {
|
||||
const INDEX: [Which; LANES] = hi::<LANES>();
|
||||
const INDEX: [Which; LANES] = interleave::<LANES>(true);
|
||||
}
|
||||
|
||||
(Lo::swizzle2(self, other), Hi::swizzle2(self, other))
|
||||
@ -336,8 +321,10 @@ impl<const LANES: usize> Swizzle2<LANES, LANES> for Hi {
|
||||
/// The second result takes every other lane of `self` and then `other`, starting with
|
||||
/// the second lane.
|
||||
///
|
||||
/// The reverse of this operation is [`Simd::interleave`].
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(portable_simd)]
|
||||
/// # #![feature(portable_simd)]
|
||||
/// # use core::simd::Simd;
|
||||
/// let a = Simd::from_array([0, 4, 1, 5]);
|
||||
/// let b = Simd::from_array([2, 6, 3, 7]);
|
||||
@ -348,22 +335,17 @@ impl<const LANES: usize> Swizzle2<LANES, LANES> for Hi {
|
||||
#[inline]
|
||||
#[must_use = "method returns a new vector and does not mutate the original inputs"]
|
||||
pub fn deinterleave(self, other: Self) -> (Self, Self) {
|
||||
const fn even<const LANES: usize>() -> [Which; LANES] {
|
||||
const fn deinterleave<const LANES: usize>(second: bool) -> [Which; LANES] {
|
||||
let mut idx = [Which::First(0); LANES];
|
||||
let mut i = 0;
|
||||
while i < LANES / 2 {
|
||||
idx[i] = Which::First(2 * i);
|
||||
idx[i + LANES / 2] = Which::Second(2 * i);
|
||||
i += 1;
|
||||
}
|
||||
idx
|
||||
}
|
||||
const fn odd<const LANES: usize>() -> [Which; LANES] {
|
||||
let mut idx = [Which::First(0); LANES];
|
||||
let mut i = 0;
|
||||
while i < LANES / 2 {
|
||||
idx[i] = Which::First(2 * i + 1);
|
||||
idx[i + LANES / 2] = Which::Second(2 * i + 1);
|
||||
while i < LANES {
|
||||
// Treat the source as a concatenated vector
|
||||
let src_index = i * 2 + second as usize;
|
||||
idx[i] = if src_index < LANES {
|
||||
Which::First(src_index)
|
||||
} else {
|
||||
Which::Second(src_index % LANES)
|
||||
};
|
||||
i += 1;
|
||||
}
|
||||
idx
|
||||
@ -373,11 +355,11 @@ pub fn deinterleave(self, other: Self) -> (Self, Self) {
|
||||
struct Odd;
|
||||
|
||||
impl<const LANES: usize> Swizzle2<LANES, LANES> for Even {
|
||||
const INDEX: [Which; LANES] = even::<LANES>();
|
||||
const INDEX: [Which; LANES] = deinterleave::<LANES>(false);
|
||||
}
|
||||
|
||||
impl<const LANES: usize> Swizzle2<LANES, LANES> for Odd {
|
||||
const INDEX: [Which; LANES] = odd::<LANES>();
|
||||
const INDEX: [Which; LANES] = deinterleave::<LANES>(true);
|
||||
}
|
||||
|
||||
(Even::swizzle2(self, other), Odd::swizzle2(self, other))
|
||||
|
157
library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
Normal file
157
library/portable-simd/crates/core_simd/src/swizzle_dyn.rs
Normal file
@ -0,0 +1,157 @@
|
||||
use crate::simd::{LaneCount, Simd, SupportedLaneCount};
|
||||
use core::mem;
|
||||
|
||||
impl<const N: usize> Simd<u8, N>
|
||||
where
|
||||
LaneCount<N>: SupportedLaneCount,
|
||||
{
|
||||
/// Swizzle a vector of bytes according to the index vector.
|
||||
/// Indices within range select the appropriate byte.
|
||||
/// Indices "out of bounds" instead select 0.
|
||||
///
|
||||
/// Note that the current implementation is selected during build-time
|
||||
/// of the standard library, so `cargo build -Zbuild-std` may be necessary
|
||||
/// to unlock better performance, especially for larger vectors.
|
||||
/// A planned compiler improvement will enable using `#[target_feature]` instead.
|
||||
#[inline]
|
||||
pub fn swizzle_dyn(self, idxs: Simd<u8, N>) -> Self {
|
||||
#![allow(unused_imports, unused_unsafe)]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use core::arch::aarch64::{uint8x8_t, vqtbl1q_u8, vtbl1_u8};
|
||||
#[cfg(all(target_arch = "arm", target_feature = "v7"))]
|
||||
use core::arch::arm::{uint8x8_t, vtbl1_u8};
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use core::arch::wasm32 as wasm;
|
||||
#[cfg(target_arch = "x86")]
|
||||
use core::arch::x86;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use core::arch::x86_64 as x86;
|
||||
// SAFETY: Intrinsics covered by cfg
|
||||
unsafe {
|
||||
match N {
|
||||
#[cfg(target_feature = "neon")]
|
||||
8 => transize(vtbl1_u8, self, idxs),
|
||||
#[cfg(target_feature = "ssse3")]
|
||||
16 => transize(x86::_mm_shuffle_epi8, self, idxs),
|
||||
#[cfg(target_feature = "simd128")]
|
||||
16 => transize(wasm::i8x16_swizzle, self, idxs),
|
||||
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
|
||||
16 => transize(vqtbl1q_u8, self, idxs),
|
||||
#[cfg(all(target_feature = "avx2", not(target_feature = "avx512vbmi")))]
|
||||
32 => transize_raw(avx2_pshufb, self, idxs),
|
||||
#[cfg(target_feature = "avx512vl,avx512vbmi")]
|
||||
32 => transize(x86::_mm256_permutexvar_epi8, self, idxs),
|
||||
// Notable absence: avx512bw shuffle
|
||||
// If avx512bw is available, odds of avx512vbmi are good
|
||||
// FIXME: initial AVX512VBMI variant didn't actually pass muster
|
||||
// #[cfg(target_feature = "avx512vbmi")]
|
||||
// 64 => transize(x86::_mm512_permutexvar_epi8, self, idxs),
|
||||
_ => {
|
||||
let mut array = [0; N];
|
||||
for (i, k) in idxs.to_array().into_iter().enumerate() {
|
||||
if (k as usize) < N {
|
||||
array[i] = self[k as usize];
|
||||
};
|
||||
}
|
||||
array.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// "vpshufb like it was meant to be" on AVX2
|
||||
///
|
||||
/// # Safety
|
||||
/// This requires AVX2 to work
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
#[target_feature(enable = "avx2")]
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
#[allow(clippy::let_and_return)]
|
||||
unsafe fn avx2_pshufb(bytes: Simd<u8, 32>, idxs: Simd<u8, 32>) -> Simd<u8, 32> {
|
||||
use crate::simd::SimdPartialOrd;
|
||||
#[cfg(target_arch = "x86")]
|
||||
use core::arch::x86;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use core::arch::x86_64 as x86;
|
||||
use x86::_mm256_permute2x128_si256 as avx2_cross_shuffle;
|
||||
use x86::_mm256_shuffle_epi8 as avx2_half_pshufb;
|
||||
let mid = Simd::splat(16u8);
|
||||
let high = mid + mid;
|
||||
// SAFETY: Caller promised AVX2
|
||||
unsafe {
|
||||
// This is ordering sensitive, and LLVM will order these how you put them.
|
||||
// Most AVX2 impls use ~5 "ports", and only 1 or 2 are capable of permutes.
|
||||
// But the "compose" step will lower to ops that can also use at least 1 other port.
|
||||
// So this tries to break up permutes so composition flows through "open" ports.
|
||||
// Comparative benches should be done on multiple AVX2 CPUs before reordering this
|
||||
|
||||
let hihi = avx2_cross_shuffle::<0x11>(bytes.into(), bytes.into());
|
||||
let hi_shuf = Simd::from(avx2_half_pshufb(
|
||||
hihi, // duplicate the vector's top half
|
||||
idxs.into(), // so that using only 4 bits of an index still picks bytes 16-31
|
||||
));
|
||||
// A zero-fill during the compose step gives the "all-Neon-like" OOB-is-0 semantics
|
||||
let compose = idxs.simd_lt(high).select(hi_shuf, Simd::splat(0));
|
||||
let lolo = avx2_cross_shuffle::<0x00>(bytes.into(), bytes.into());
|
||||
let lo_shuf = Simd::from(avx2_half_pshufb(lolo, idxs.into()));
|
||||
// Repeat, then pick indices < 16, overwriting indices 0-15 from previous compose step
|
||||
let compose = idxs.simd_lt(mid).select(lo_shuf, compose);
|
||||
compose
|
||||
}
|
||||
}
|
||||
|
||||
/// This sets up a call to an architecture-specific function, and in doing so
|
||||
/// it persuades rustc that everything is the correct size. Which it is.
|
||||
/// This would not be needed if one could convince Rust that, by matching on N,
|
||||
/// N is that value, and thus it would be valid to substitute e.g. 16.
|
||||
///
|
||||
/// # Safety
|
||||
/// The correctness of this function hinges on the sizes agreeing in actuality.
|
||||
#[allow(dead_code)]
|
||||
#[inline(always)]
|
||||
unsafe fn transize<T, const N: usize>(
|
||||
f: unsafe fn(T, T) -> T,
|
||||
bytes: Simd<u8, N>,
|
||||
idxs: Simd<u8, N>,
|
||||
) -> Simd<u8, N>
|
||||
where
|
||||
LaneCount<N>: SupportedLaneCount,
|
||||
{
|
||||
let idxs = zeroing_idxs(idxs);
|
||||
// SAFETY: Same obligation to use this function as to use mem::transmute_copy.
|
||||
unsafe { mem::transmute_copy(&f(mem::transmute_copy(&bytes), mem::transmute_copy(&idxs))) }
|
||||
}
|
||||
|
||||
/// Make indices that yield 0 for this architecture
|
||||
#[inline(always)]
|
||||
fn zeroing_idxs<const N: usize>(idxs: Simd<u8, N>) -> Simd<u8, N>
|
||||
where
|
||||
LaneCount<N>: SupportedLaneCount,
|
||||
{
|
||||
// On x86, make sure the top bit is set.
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
let idxs = {
|
||||
use crate::simd::SimdPartialOrd;
|
||||
idxs.simd_lt(Simd::splat(N as u8))
|
||||
.select(idxs, Simd::splat(u8::MAX))
|
||||
};
|
||||
// Simply do nothing on most architectures.
|
||||
idxs
|
||||
}
|
||||
|
||||
/// As transize but no implicit call to `zeroing_idxs`.
|
||||
#[allow(dead_code)]
|
||||
#[inline(always)]
|
||||
unsafe fn transize_raw<T, const N: usize>(
|
||||
f: unsafe fn(T, T) -> T,
|
||||
bytes: Simd<u8, N>,
|
||||
idxs: Simd<u8, N>,
|
||||
) -> Simd<u8, N>
|
||||
where
|
||||
LaneCount<N>: SupportedLaneCount,
|
||||
{
|
||||
// SAFETY: Same obligation to use this function as to use mem::transmute_copy.
|
||||
unsafe { mem::transmute_copy(&f(mem::transmute_copy(&bytes), mem::transmute_copy(&idxs))) }
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,24 +0,0 @@
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
use crate::simd::Simd;
|
||||
|
||||
/// A 64-bit SIMD vector with two elements of type `f32`.
|
||||
pub type f32x2 = Simd<f32, 2>;
|
||||
|
||||
/// A 128-bit SIMD vector with four elements of type `f32`.
|
||||
pub type f32x4 = Simd<f32, 4>;
|
||||
|
||||
/// A 256-bit SIMD vector with eight elements of type `f32`.
|
||||
pub type f32x8 = Simd<f32, 8>;
|
||||
|
||||
/// A 512-bit SIMD vector with 16 elements of type `f32`.
|
||||
pub type f32x16 = Simd<f32, 16>;
|
||||
|
||||
/// A 128-bit SIMD vector with two elements of type `f64`.
|
||||
pub type f64x2 = Simd<f64, 2>;
|
||||
|
||||
/// A 256-bit SIMD vector with four elements of type `f64`.
|
||||
pub type f64x4 = Simd<f64, 4>;
|
||||
|
||||
/// A 512-bit SIMD vector with eight elements of type `f64`.
|
||||
pub type f64x8 = Simd<f64, 8>;
|
@ -1,63 +0,0 @@
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
use crate::simd::Simd;
|
||||
|
||||
/// A SIMD vector with two elements of type `isize`.
|
||||
pub type isizex2 = Simd<isize, 2>;
|
||||
|
||||
/// A SIMD vector with four elements of type `isize`.
|
||||
pub type isizex4 = Simd<isize, 4>;
|
||||
|
||||
/// A SIMD vector with eight elements of type `isize`.
|
||||
pub type isizex8 = Simd<isize, 8>;
|
||||
|
||||
/// A 32-bit SIMD vector with two elements of type `i16`.
|
||||
pub type i16x2 = Simd<i16, 2>;
|
||||
|
||||
/// A 64-bit SIMD vector with four elements of type `i16`.
|
||||
pub type i16x4 = Simd<i16, 4>;
|
||||
|
||||
/// A 128-bit SIMD vector with eight elements of type `i16`.
|
||||
pub type i16x8 = Simd<i16, 8>;
|
||||
|
||||
/// A 256-bit SIMD vector with 16 elements of type `i16`.
|
||||
pub type i16x16 = Simd<i16, 16>;
|
||||
|
||||
/// A 512-bit SIMD vector with 32 elements of type `i16`.
|
||||
pub type i16x32 = Simd<i16, 32>;
|
||||
|
||||
/// A 64-bit SIMD vector with two elements of type `i32`.
|
||||
pub type i32x2 = Simd<i32, 2>;
|
||||
|
||||
/// A 128-bit SIMD vector with four elements of type `i32`.
|
||||
pub type i32x4 = Simd<i32, 4>;
|
||||
|
||||
/// A 256-bit SIMD vector with eight elements of type `i32`.
|
||||
pub type i32x8 = Simd<i32, 8>;
|
||||
|
||||
/// A 512-bit SIMD vector with 16 elements of type `i32`.
|
||||
pub type i32x16 = Simd<i32, 16>;
|
||||
|
||||
/// A 128-bit SIMD vector with two elements of type `i64`.
|
||||
pub type i64x2 = Simd<i64, 2>;
|
||||
|
||||
/// A 256-bit SIMD vector with four elements of type `i64`.
|
||||
pub type i64x4 = Simd<i64, 4>;
|
||||
|
||||
/// A 512-bit SIMD vector with eight elements of type `i64`.
|
||||
pub type i64x8 = Simd<i64, 8>;
|
||||
|
||||
/// A 32-bit SIMD vector with four elements of type `i8`.
|
||||
pub type i8x4 = Simd<i8, 4>;
|
||||
|
||||
/// A 64-bit SIMD vector with eight elements of type `i8`.
|
||||
pub type i8x8 = Simd<i8, 8>;
|
||||
|
||||
/// A 128-bit SIMD vector with 16 elements of type `i8`.
|
||||
pub type i8x16 = Simd<i8, 16>;
|
||||
|
||||
/// A 256-bit SIMD vector with 32 elements of type `i8`.
|
||||
pub type i8x32 = Simd<i8, 32>;
|
||||
|
||||
/// A 512-bit SIMD vector with 64 elements of type `i8`.
|
||||
pub type i8x64 = Simd<i8, 64>;
|
@ -1,51 +0,0 @@
|
||||
//! Private implementation details of public gather/scatter APIs.
|
||||
use crate::simd::intrinsics;
|
||||
use crate::simd::{LaneCount, Simd, SupportedLaneCount};
|
||||
|
||||
/// A vector of *const T.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(simd)]
|
||||
pub(crate) struct SimdConstPtr<T, const LANES: usize>([*const T; LANES]);
|
||||
|
||||
impl<T, const LANES: usize> SimdConstPtr<T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
T: Sized,
|
||||
{
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn splat(ptr: *const T) -> Self {
|
||||
Self([ptr; LANES])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
|
||||
// Safety: this intrinsic doesn't have a precondition
|
||||
unsafe { intrinsics::simd_arith_offset(self, addend) }
|
||||
}
|
||||
}
|
||||
|
||||
/// A vector of *mut T. Be very careful around potential aliasing.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(simd)]
|
||||
pub(crate) struct SimdMutPtr<T, const LANES: usize>([*mut T; LANES]);
|
||||
|
||||
impl<T, const LANES: usize> SimdMutPtr<T, LANES>
|
||||
where
|
||||
LaneCount<LANES>: SupportedLaneCount,
|
||||
T: Sized,
|
||||
{
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn splat(ptr: *mut T) -> Self {
|
||||
Self([ptr; LANES])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
|
||||
// Safety: this intrinsic doesn't have a precondition
|
||||
unsafe { intrinsics::simd_arith_offset(self, addend) }
|
||||
}
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
use crate::simd::Simd;
|
||||
|
||||
/// A SIMD vector with two elements of type `usize`.
|
||||
pub type usizex2 = Simd<usize, 2>;
|
||||
|
||||
/// A SIMD vector with four elements of type `usize`.
|
||||
pub type usizex4 = Simd<usize, 4>;
|
||||
|
||||
/// A SIMD vector with eight elements of type `usize`.
|
||||
pub type usizex8 = Simd<usize, 8>;
|
||||
|
||||
/// A 32-bit SIMD vector with two elements of type `u16`.
|
||||
pub type u16x2 = Simd<u16, 2>;
|
||||
|
||||
/// A 64-bit SIMD vector with four elements of type `u16`.
|
||||
pub type u16x4 = Simd<u16, 4>;
|
||||
|
||||
/// A 128-bit SIMD vector with eight elements of type `u16`.
|
||||
pub type u16x8 = Simd<u16, 8>;
|
||||
|
||||
/// A 256-bit SIMD vector with 16 elements of type `u16`.
|
||||
pub type u16x16 = Simd<u16, 16>;
|
||||
|
||||
/// A 512-bit SIMD vector with 32 elements of type `u16`.
|
||||
pub type u16x32 = Simd<u16, 32>;
|
||||
|
||||
/// A 64-bit SIMD vector with two elements of type `u32`.
|
||||
pub type u32x2 = Simd<u32, 2>;
|
||||
|
||||
/// A 128-bit SIMD vector with four elements of type `u32`.
|
||||
pub type u32x4 = Simd<u32, 4>;
|
||||
|
||||
/// A 256-bit SIMD vector with eight elements of type `u32`.
|
||||
pub type u32x8 = Simd<u32, 8>;
|
||||
|
||||
/// A 512-bit SIMD vector with 16 elements of type `u32`.
|
||||
pub type u32x16 = Simd<u32, 16>;
|
||||
|
||||
/// A 128-bit SIMD vector with two elements of type `u64`.
|
||||
pub type u64x2 = Simd<u64, 2>;
|
||||
|
||||
/// A 256-bit SIMD vector with four elements of type `u64`.
|
||||
pub type u64x4 = Simd<u64, 4>;
|
||||
|
||||
/// A 512-bit SIMD vector with eight elements of type `u64`.
|
||||
pub type u64x8 = Simd<u64, 8>;
|
||||
|
||||
/// A 32-bit SIMD vector with four elements of type `u8`.
|
||||
pub type u8x4 = Simd<u8, 4>;
|
||||
|
||||
/// A 64-bit SIMD vector with eight elements of type `u8`.
|
||||
pub type u8x8 = Simd<u8, 8>;
|
||||
|
||||
/// A 128-bit SIMD vector with 16 elements of type `u8`.
|
||||
pub type u8x16 = Simd<u8, 16>;
|
||||
|
||||
/// A 256-bit SIMD vector with 32 elements of type `u8`.
|
||||
pub type u8x32 = Simd<u8, 32>;
|
||||
|
||||
/// A 512-bit SIMD vector with 64 elements of type `u8`.
|
||||
pub type u8x64 = Simd<u8, 64>;
|
@ -1,6 +1,6 @@
|
||||
// Test that we handle all our "auto-deref" cases correctly.
|
||||
#![feature(portable_simd)]
|
||||
use core_simd::f32x4;
|
||||
use core_simd::simd::f32x4;
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use wasm_bindgen_test::*;
|
||||
|
@ -2,7 +2,7 @@ macro_rules! mask_tests {
|
||||
{ $vector:ident, $lanes:literal } => {
|
||||
#[cfg(test)]
|
||||
mod $vector {
|
||||
use core_simd::$vector as Vector;
|
||||
use core_simd::simd::$vector as Vector;
|
||||
const LANES: usize = $lanes;
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
|
@ -13,11 +13,13 @@ mod $type {
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use wasm_bindgen_test::*;
|
||||
|
||||
use core_simd::simd::Mask;
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||
fn set_and_test() {
|
||||
let values = [true, false, false, true, false, false, true, false];
|
||||
let mut mask = core_simd::Mask::<$type, 8>::splat(false);
|
||||
let mut mask = Mask::<$type, 8>::splat(false);
|
||||
for (lane, value) in values.iter().copied().enumerate() {
|
||||
mask.set(lane, value);
|
||||
}
|
||||
@ -29,7 +31,7 @@ fn set_and_test() {
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn set_invalid_lane() {
|
||||
let mut mask = core_simd::Mask::<$type, 8>::splat(false);
|
||||
let mut mask = Mask::<$type, 8>::splat(false);
|
||||
mask.set(8, true);
|
||||
let _ = mask;
|
||||
}
|
||||
@ -37,24 +39,24 @@ fn set_invalid_lane() {
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_invalid_lane() {
|
||||
let mask = core_simd::Mask::<$type, 8>::splat(false);
|
||||
let mask = Mask::<$type, 8>::splat(false);
|
||||
let _ = mask.test(8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn any() {
|
||||
assert!(!core_simd::Mask::<$type, 8>::splat(false).any());
|
||||
assert!(core_simd::Mask::<$type, 8>::splat(true).any());
|
||||
let mut v = core_simd::Mask::<$type, 8>::splat(false);
|
||||
assert!(!Mask::<$type, 8>::splat(false).any());
|
||||
assert!(Mask::<$type, 8>::splat(true).any());
|
||||
let mut v = Mask::<$type, 8>::splat(false);
|
||||
v.set(2, true);
|
||||
assert!(v.any());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all() {
|
||||
assert!(!core_simd::Mask::<$type, 8>::splat(false).all());
|
||||
assert!(core_simd::Mask::<$type, 8>::splat(true).all());
|
||||
let mut v = core_simd::Mask::<$type, 8>::splat(false);
|
||||
assert!(!Mask::<$type, 8>::splat(false).all());
|
||||
assert!(Mask::<$type, 8>::splat(true).all());
|
||||
let mut v = Mask::<$type, 8>::splat(false);
|
||||
v.set(2, true);
|
||||
assert!(!v.all());
|
||||
}
|
||||
@ -62,57 +64,57 @@ fn all() {
|
||||
#[test]
|
||||
fn roundtrip_int_conversion() {
|
||||
let values = [true, false, false, true, false, false, true, false];
|
||||
let mask = core_simd::Mask::<$type, 8>::from_array(values);
|
||||
let mask = Mask::<$type, 8>::from_array(values);
|
||||
let int = mask.to_int();
|
||||
assert_eq!(int.to_array(), [-1, 0, 0, -1, 0, 0, -1, 0]);
|
||||
assert_eq!(core_simd::Mask::<$type, 8>::from_int(int), mask);
|
||||
assert_eq!(Mask::<$type, 8>::from_int(int), mask);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_bitmask_conversion() {
|
||||
use core_simd::ToBitMask;
|
||||
use core_simd::simd::ToBitMask;
|
||||
let values = [
|
||||
true, false, false, true, false, false, true, false,
|
||||
true, true, false, false, false, false, false, true,
|
||||
];
|
||||
let mask = core_simd::Mask::<$type, 16>::from_array(values);
|
||||
let mask = Mask::<$type, 16>::from_array(values);
|
||||
let bitmask = mask.to_bitmask();
|
||||
assert_eq!(bitmask, 0b1000001101001001);
|
||||
assert_eq!(core_simd::Mask::<$type, 16>::from_bitmask(bitmask), mask);
|
||||
assert_eq!(Mask::<$type, 16>::from_bitmask(bitmask), mask);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_bitmask_conversion_short() {
|
||||
use core_simd::ToBitMask;
|
||||
use core_simd::simd::ToBitMask;
|
||||
|
||||
let values = [
|
||||
false, false, false, true,
|
||||
];
|
||||
let mask = core_simd::Mask::<$type, 4>::from_array(values);
|
||||
let mask = Mask::<$type, 4>::from_array(values);
|
||||
let bitmask = mask.to_bitmask();
|
||||
assert_eq!(bitmask, 0b1000);
|
||||
assert_eq!(core_simd::Mask::<$type, 4>::from_bitmask(bitmask), mask);
|
||||
assert_eq!(Mask::<$type, 4>::from_bitmask(bitmask), mask);
|
||||
|
||||
let values = [true, false];
|
||||
let mask = core_simd::Mask::<$type, 2>::from_array(values);
|
||||
let mask = Mask::<$type, 2>::from_array(values);
|
||||
let bitmask = mask.to_bitmask();
|
||||
assert_eq!(bitmask, 0b01);
|
||||
assert_eq!(core_simd::Mask::<$type, 2>::from_bitmask(bitmask), mask);
|
||||
assert_eq!(Mask::<$type, 2>::from_bitmask(bitmask), mask);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cast() {
|
||||
fn cast_impl<T: core_simd::MaskElement>()
|
||||
fn cast_impl<T: core_simd::simd::MaskElement>()
|
||||
where
|
||||
core_simd::Mask<$type, 8>: Into<core_simd::Mask<T, 8>>,
|
||||
Mask<$type, 8>: Into<Mask<T, 8>>,
|
||||
{
|
||||
let values = [true, false, false, true, false, false, true, false];
|
||||
let mask = core_simd::Mask::<$type, 8>::from_array(values);
|
||||
let mask = Mask::<$type, 8>::from_array(values);
|
||||
|
||||
let cast_mask = mask.cast::<T>();
|
||||
assert_eq!(values, cast_mask.to_array());
|
||||
|
||||
let into_mask: core_simd::Mask<T, 8> = mask.into();
|
||||
let into_mask: Mask<T, 8> = mask.into();
|
||||
assert_eq!(values, into_mask.to_array());
|
||||
}
|
||||
|
||||
@ -126,15 +128,15 @@ fn cast_impl<T: core_simd::MaskElement>()
|
||||
#[cfg(feature = "generic_const_exprs")]
|
||||
#[test]
|
||||
fn roundtrip_bitmask_array_conversion() {
|
||||
use core_simd::ToBitMaskArray;
|
||||
use core_simd::simd::ToBitMaskArray;
|
||||
let values = [
|
||||
true, false, false, true, false, false, true, false,
|
||||
true, true, false, false, false, false, false, true,
|
||||
];
|
||||
let mask = core_simd::Mask::<$type, 16>::from_array(values);
|
||||
let mask = Mask::<$type, 16>::from_array(values);
|
||||
let bitmask = mask.to_bitmask_array();
|
||||
assert_eq!(bitmask, [0b01001001, 0b10000011]);
|
||||
assert_eq!(core_simd::Mask::<$type, 16>::from_bitmask_array(bitmask), mask);
|
||||
assert_eq!(Mask::<$type, 16>::from_bitmask_array(bitmask), mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,9 +152,10 @@ mod mask_api {
|
||||
|
||||
#[test]
|
||||
fn convert() {
|
||||
use core_simd::simd::Mask;
|
||||
let values = [true, false, false, true, false, false, true, false];
|
||||
assert_eq!(
|
||||
core_simd::Mask::<i8, 8>::from_array(values),
|
||||
core_simd::Mask::<i32, 8>::from_array(values).into()
|
||||
Mask::<i8, 8>::from_array(values),
|
||||
Mask::<i32, 8>::from_array(values).into()
|
||||
);
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ macro_rules! impl_unary_op_test {
|
||||
test_helpers::test_lanes! {
|
||||
fn $fn<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&<core_simd::Simd<$scalar, LANES> as core::ops::$trait>::$fn,
|
||||
&<core_simd::simd::Simd<$scalar, LANES> as core::ops::$trait>::$fn,
|
||||
&$scalar_fn,
|
||||
&|_| true,
|
||||
);
|
||||
@ -27,7 +27,7 @@ macro_rules! impl_binary_op_test {
|
||||
{ $scalar:ty, $trait:ident :: $fn:ident, $trait_assign:ident :: $fn_assign:ident, $scalar_fn:expr } => {
|
||||
mod $fn {
|
||||
use super::*;
|
||||
use core_simd::Simd;
|
||||
use core_simd::simd::Simd;
|
||||
|
||||
test_helpers::test_lanes! {
|
||||
fn normal<const LANES: usize>() {
|
||||
@ -64,7 +64,7 @@ macro_rules! impl_binary_checked_op_test {
|
||||
{ $scalar:ty, $trait:ident :: $fn:ident, $trait_assign:ident :: $fn_assign:ident, $scalar_fn:expr, $check_fn:expr } => {
|
||||
mod $fn {
|
||||
use super::*;
|
||||
use core_simd::Simd;
|
||||
use core_simd::simd::Simd;
|
||||
|
||||
test_helpers::test_lanes! {
|
||||
fn normal<const LANES: usize>() {
|
||||
@ -173,7 +173,7 @@ macro_rules! impl_signed_tests {
|
||||
{ $scalar:tt } => {
|
||||
mod $scalar {
|
||||
use core_simd::simd::SimdInt;
|
||||
type Vector<const LANES: usize> = core_simd::Simd<Scalar, LANES>;
|
||||
type Vector<const LANES: usize> = core_simd::simd::Simd<Scalar, LANES>;
|
||||
type Scalar = $scalar;
|
||||
|
||||
impl_common_integer_tests! { Vector, Scalar }
|
||||
@ -314,7 +314,7 @@ macro_rules! impl_unsigned_tests {
|
||||
{ $scalar:tt } => {
|
||||
mod $scalar {
|
||||
use core_simd::simd::SimdUint;
|
||||
type Vector<const LANES: usize> = core_simd::Simd<Scalar, LANES>;
|
||||
type Vector<const LANES: usize> = core_simd::simd::Simd<Scalar, LANES>;
|
||||
type Scalar = $scalar;
|
||||
|
||||
impl_common_integer_tests! { Vector, Scalar }
|
||||
@ -348,8 +348,8 @@ fn rem_zero_panic<const LANES: usize>() {
|
||||
macro_rules! impl_float_tests {
|
||||
{ $scalar:tt, $int_scalar:tt } => {
|
||||
mod $scalar {
|
||||
use core_simd::SimdFloat;
|
||||
type Vector<const LANES: usize> = core_simd::Simd<Scalar, LANES>;
|
||||
use core_simd::simd::SimdFloat;
|
||||
type Vector<const LANES: usize> = core_simd::simd::Simd<Scalar, LANES>;
|
||||
type Scalar = $scalar;
|
||||
|
||||
impl_unary_op_test!(Scalar, Neg::neg);
|
||||
|
111
library/portable-simd/crates/core_simd/tests/pointers.rs
Normal file
111
library/portable-simd/crates/core_simd/tests/pointers.rs
Normal file
@ -0,0 +1,111 @@
|
||||
#![feature(portable_simd, strict_provenance)]
|
||||
|
||||
use core_simd::simd::{Simd, SimdConstPtr, SimdMutPtr};
|
||||
|
||||
macro_rules! common_tests {
|
||||
{ $constness:ident } => {
|
||||
test_helpers::test_lanes! {
|
||||
fn is_null<const LANES: usize>() {
|
||||
test_helpers::test_unary_mask_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::is_null,
|
||||
&<*$constness u32>::is_null,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn addr<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::addr,
|
||||
&<*$constness u32>::addr,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn with_addr<const LANES: usize>() {
|
||||
test_helpers::test_binary_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::with_addr,
|
||||
&<*$constness u32>::with_addr,
|
||||
&|_, _| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn expose_addr<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::expose_addr,
|
||||
&<*$constness u32>::expose_addr,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn wrapping_offset<const LANES: usize>() {
|
||||
test_helpers::test_binary_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::wrapping_offset,
|
||||
&<*$constness u32>::wrapping_offset,
|
||||
&|_, _| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn wrapping_add<const LANES: usize>() {
|
||||
test_helpers::test_binary_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::wrapping_add,
|
||||
&<*$constness u32>::wrapping_add,
|
||||
&|_, _| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn wrapping_sub<const LANES: usize>() {
|
||||
test_helpers::test_binary_elementwise(
|
||||
&Simd::<*$constness u32, LANES>::wrapping_sub,
|
||||
&<*$constness u32>::wrapping_sub,
|
||||
&|_, _| true,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod const_ptr {
|
||||
use super::*;
|
||||
common_tests! { const }
|
||||
|
||||
test_helpers::test_lanes! {
|
||||
fn cast_mut<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&Simd::<*const u32, LANES>::cast_mut,
|
||||
&<*const u32>::cast_mut,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn from_exposed_addr<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&Simd::<*const u32, LANES>::from_exposed_addr,
|
||||
&core::ptr::from_exposed_addr::<u32>,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod mut_ptr {
|
||||
use super::*;
|
||||
common_tests! { mut }
|
||||
|
||||
test_helpers::test_lanes! {
|
||||
fn cast_const<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&Simd::<*mut u32, LANES>::cast_const,
|
||||
&<*mut u32>::cast_const,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
|
||||
fn from_exposed_addr<const LANES: usize>() {
|
||||
test_helpers::test_unary_elementwise(
|
||||
&Simd::<*mut u32, LANES>::from_exposed_addr,
|
||||
&core::ptr::from_exposed_addr_mut::<u32>,
|
||||
&|_| true,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@ -5,7 +5,7 @@ macro_rules! float_rounding_test {
|
||||
mod $scalar {
|
||||
use std_float::StdFloat;
|
||||
|
||||
type Vector<const LANES: usize> = core_simd::Simd<$scalar, LANES>;
|
||||
type Vector<const LANES: usize> = core_simd::simd::Simd<$scalar, LANES>;
|
||||
type Scalar = $scalar;
|
||||
type IntScalar = $int_scalar;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#![feature(portable_simd)]
|
||||
use core_simd::{Simd, Swizzle};
|
||||
use core_simd::simd::{Simd, Swizzle};
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use wasm_bindgen_test::*;
|
||||
@ -60,3 +60,17 @@ fn interleave() {
|
||||
assert_eq!(even, a);
|
||||
assert_eq!(odd, b);
|
||||
}
|
||||
|
||||
// portable-simd#298
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||
fn interleave_one() {
|
||||
let a = Simd::from_array([0]);
|
||||
let b = Simd::from_array([1]);
|
||||
let (lo, hi) = a.interleave(b);
|
||||
assert_eq!(lo.to_array(), [0]);
|
||||
assert_eq!(hi.to_array(), [1]);
|
||||
let (even, odd) = lo.deinterleave(hi);
|
||||
assert_eq!(even, a);
|
||||
assert_eq!(odd, b);
|
||||
}
|
||||
|
74
library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs
Normal file
74
library/portable-simd/crates/core_simd/tests/swizzle_dyn.rs
Normal file
@ -0,0 +1,74 @@
|
||||
#![feature(portable_simd)]
|
||||
use core::{fmt, ops::RangeInclusive};
|
||||
use proptest;
|
||||
use test_helpers::{self, biteq, make_runner, prop_assert_biteq};
|
||||
|
||||
fn swizzle_dyn_scalar_ver<const N: usize>(values: [u8; N], idxs: [u8; N]) -> [u8; N] {
|
||||
let mut array = [0; N];
|
||||
for (i, k) in idxs.into_iter().enumerate() {
|
||||
if (k as usize) < N {
|
||||
array[i] = values[k as usize];
|
||||
};
|
||||
}
|
||||
array
|
||||
}
|
||||
|
||||
test_helpers::test_lanes! {
|
||||
fn swizzle_dyn<const N: usize>() {
|
||||
match_simd_with_fallback(
|
||||
&core_simd::simd::Simd::<u8, N>::swizzle_dyn,
|
||||
&swizzle_dyn_scalar_ver,
|
||||
&|_, _| true,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn match_simd_with_fallback<Scalar, ScalarResult, Vector, VectorResult, const N: usize>(
|
||||
fv: &dyn Fn(Vector, Vector) -> VectorResult,
|
||||
fs: &dyn Fn([Scalar; N], [Scalar; N]) -> [ScalarResult; N],
|
||||
check: &dyn Fn([Scalar; N], [Scalar; N]) -> bool,
|
||||
) where
|
||||
Scalar: Copy + fmt::Debug + SwizzleStrategy,
|
||||
ScalarResult: Copy + biteq::BitEq + fmt::Debug + SwizzleStrategy,
|
||||
Vector: Into<[Scalar; N]> + From<[Scalar; N]> + Copy,
|
||||
VectorResult: Into<[ScalarResult; N]> + From<[ScalarResult; N]> + Copy,
|
||||
{
|
||||
test_swizzles_2(&|x: [Scalar; N], y: [Scalar; N]| {
|
||||
proptest::prop_assume!(check(x, y));
|
||||
let result_v: [ScalarResult; N] = fv(x.into(), y.into()).into();
|
||||
let result_s: [ScalarResult; N] = fs(x, y);
|
||||
crate::prop_assert_biteq!(result_v, result_s);
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
fn test_swizzles_2<A: fmt::Debug + SwizzleStrategy, B: fmt::Debug + SwizzleStrategy>(
|
||||
f: &dyn Fn(A, B) -> proptest::test_runner::TestCaseResult,
|
||||
) {
|
||||
let mut runner = make_runner();
|
||||
runner
|
||||
.run(
|
||||
&(A::swizzled_strategy(), B::swizzled_strategy()),
|
||||
|(a, b)| f(a, b),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub trait SwizzleStrategy {
|
||||
type Strategy: proptest::strategy::Strategy<Value = Self>;
|
||||
fn swizzled_strategy() -> Self::Strategy;
|
||||
}
|
||||
|
||||
impl SwizzleStrategy for u8 {
|
||||
type Strategy = RangeInclusive<u8>;
|
||||
fn swizzled_strategy() -> Self::Strategy {
|
||||
0..=64
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug + SwizzleStrategy, const N: usize> SwizzleStrategy for [T; N] {
|
||||
type Strategy = test_helpers::array::UniformArrayStrategy<T::Strategy, Self>;
|
||||
fn swizzled_strategy() -> Self::Strategy {
|
||||
Self::Strategy::new(T::swizzled_strategy())
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
#![allow(incomplete_features)]
|
||||
#![cfg(feature = "generic_const_exprs")]
|
||||
|
||||
use core_simd::Simd;
|
||||
use core_simd::simd::Simd;
|
||||
|
||||
#[test]
|
||||
fn byte_convert() {
|
||||
|
@ -0,0 +1,25 @@
|
||||
#![feature(portable_simd)]
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use wasm_bindgen_test::*;
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
use core_simd::simd::i32x4;
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||
fn try_from_slice() {
|
||||
// Equal length
|
||||
assert_eq!(
|
||||
i32x4::try_from([1, 2, 3, 4].as_slice()).unwrap(),
|
||||
i32x4::from_array([1, 2, 3, 4])
|
||||
);
|
||||
|
||||
// Slice length > vector length
|
||||
assert!(i32x4::try_from([1, 2, 3, 4, 5].as_slice()).is_err());
|
||||
|
||||
// Slice length < vector length
|
||||
assert!(i32x4::try_from([1, 2, 3].as_slice()).is_err());
|
||||
}
|
@ -8,3 +8,6 @@ publish = false
|
||||
version = "0.10"
|
||||
default-features = false
|
||||
features = ["alloc"]
|
||||
|
||||
[features]
|
||||
all_lane_counts = []
|
||||
|
@ -41,6 +41,7 @@ pub struct ArrayValueTree<T> {
|
||||
|
||||
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
|
||||
let tree: [S::Tree; LANES] = unsafe {
|
||||
#[allow(clippy::uninit_assumed_init)]
|
||||
let mut tree: [MaybeUninit<S::Tree>; LANES] = MaybeUninit::uninit().assume_init();
|
||||
for t in tree.iter_mut() {
|
||||
*t = MaybeUninit::new(self.strategy.new_tree(runner)?)
|
||||
@ -60,6 +61,7 @@ fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
|
||||
|
||||
fn current(&self) -> Self::Value {
|
||||
unsafe {
|
||||
#[allow(clippy::uninit_assumed_init)]
|
||||
let mut value: [MaybeUninit<T::Value>; LANES] = MaybeUninit::uninit().assume_init();
|
||||
for (tree_elem, value_elem) in self.tree.iter().zip(value.iter_mut()) {
|
||||
*value_elem = MaybeUninit::new(tree_elem.current());
|
||||
|
@ -55,6 +55,26 @@ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
|
||||
impl_float_biteq! { f32, f64 }
|
||||
|
||||
impl<T> BitEq for *const T {
|
||||
fn biteq(&self, other: &Self) -> bool {
|
||||
self == other
|
||||
}
|
||||
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> BitEq for *mut T {
|
||||
fn biteq(&self, other: &Self) -> bool {
|
||||
self == other
|
||||
}
|
||||
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BitEq, const N: usize> BitEq for [T; N] {
|
||||
fn biteq(&self, other: &Self) -> bool {
|
||||
self.iter()
|
||||
|
@ -38,6 +38,28 @@ fn default_strategy() -> Self::Strategy {
|
||||
impl_num! { f32 }
|
||||
impl_num! { f64 }
|
||||
|
||||
impl<T> DefaultStrategy for *const T {
|
||||
type Strategy = proptest::strategy::Map<proptest::num::isize::Any, fn(isize) -> *const T>;
|
||||
fn default_strategy() -> Self::Strategy {
|
||||
fn map<T>(x: isize) -> *const T {
|
||||
x as _
|
||||
}
|
||||
use proptest::strategy::Strategy;
|
||||
proptest::num::isize::ANY.prop_map(map)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DefaultStrategy for *mut T {
|
||||
type Strategy = proptest::strategy::Map<proptest::num::isize::Any, fn(isize) -> *mut T>;
|
||||
fn default_strategy() -> Self::Strategy {
|
||||
fn map<T>(x: isize) -> *mut T {
|
||||
x as _
|
||||
}
|
||||
use proptest::strategy::Strategy;
|
||||
proptest::num::isize::ANY.prop_map(map)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
impl DefaultStrategy for u128 {
|
||||
type Strategy = proptest::num::u128::Any;
|
||||
@ -135,21 +157,21 @@ pub fn test_unary_elementwise<Scalar, ScalarResult, Vector, VectorResult, const
|
||||
fs: &dyn Fn(Scalar) -> ScalarResult,
|
||||
check: &dyn Fn([Scalar; LANES]) -> bool,
|
||||
) where
|
||||
Scalar: Copy + Default + core::fmt::Debug + DefaultStrategy,
|
||||
ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
|
||||
Scalar: Copy + core::fmt::Debug + DefaultStrategy,
|
||||
ScalarResult: Copy + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
|
||||
Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
|
||||
VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
|
||||
{
|
||||
test_1(&|x: [Scalar; LANES]| {
|
||||
proptest::prop_assume!(check(x));
|
||||
let result_1: [ScalarResult; LANES] = fv(x.into()).into();
|
||||
let result_2: [ScalarResult; LANES] = {
|
||||
let mut result = [ScalarResult::default(); LANES];
|
||||
for (i, o) in x.iter().zip(result.iter_mut()) {
|
||||
*o = fs(*i);
|
||||
}
|
||||
result
|
||||
};
|
||||
let result_2: [ScalarResult; LANES] = x
|
||||
.iter()
|
||||
.copied()
|
||||
.map(fs)
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
crate::prop_assert_biteq!(result_1, result_2);
|
||||
Ok(())
|
||||
});
|
||||
@ -162,7 +184,7 @@ pub fn test_unary_mask_elementwise<Scalar, Vector, Mask, const LANES: usize>(
|
||||
fs: &dyn Fn(Scalar) -> bool,
|
||||
check: &dyn Fn([Scalar; LANES]) -> bool,
|
||||
) where
|
||||
Scalar: Copy + Default + core::fmt::Debug + DefaultStrategy,
|
||||
Scalar: Copy + core::fmt::Debug + DefaultStrategy,
|
||||
Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
|
||||
Mask: Into<[bool; LANES]> + From<[bool; LANES]> + Copy,
|
||||
{
|
||||
@ -196,9 +218,9 @@ pub fn test_binary_elementwise<
|
||||
fs: &dyn Fn(Scalar1, Scalar2) -> ScalarResult,
|
||||
check: &dyn Fn([Scalar1; LANES], [Scalar2; LANES]) -> bool,
|
||||
) where
|
||||
Scalar1: Copy + Default + core::fmt::Debug + DefaultStrategy,
|
||||
Scalar2: Copy + Default + core::fmt::Debug + DefaultStrategy,
|
||||
ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
|
||||
Scalar1: Copy + core::fmt::Debug + DefaultStrategy,
|
||||
Scalar2: Copy + core::fmt::Debug + DefaultStrategy,
|
||||
ScalarResult: Copy + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
|
||||
Vector1: Into<[Scalar1; LANES]> + From<[Scalar1; LANES]> + Copy,
|
||||
Vector2: Into<[Scalar2; LANES]> + From<[Scalar2; LANES]> + Copy,
|
||||
VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
|
||||
@ -206,13 +228,14 @@ pub fn test_binary_elementwise<
|
||||
test_2(&|x: [Scalar1; LANES], y: [Scalar2; LANES]| {
|
||||
proptest::prop_assume!(check(x, y));
|
||||
let result_1: [ScalarResult; LANES] = fv(x.into(), y.into()).into();
|
||||
let result_2: [ScalarResult; LANES] = {
|
||||
let mut result = [ScalarResult::default(); LANES];
|
||||
for ((i1, i2), o) in x.iter().zip(y.iter()).zip(result.iter_mut()) {
|
||||
*o = fs(*i1, *i2);
|
||||
}
|
||||
result
|
||||
};
|
||||
let result_2: [ScalarResult; LANES] = x
|
||||
.iter()
|
||||
.copied()
|
||||
.zip(y.iter().copied())
|
||||
.map(|(x, y)| fs(x, y))
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
crate::prop_assert_biteq!(result_1, result_2);
|
||||
Ok(())
|
||||
});
|
||||
@ -333,6 +356,39 @@ pub fn test_ternary_elementwise<
|
||||
);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[macro_export]
|
||||
macro_rules! test_lanes_helper {
|
||||
($($(#[$meta:meta])* $fn_name:ident $lanes:literal;)+) => {
|
||||
$(
|
||||
#[test]
|
||||
$(#[$meta])*
|
||||
fn $fn_name() {
|
||||
implementation::<$lanes>();
|
||||
}
|
||||
)+
|
||||
};
|
||||
(
|
||||
$(#[$meta:meta])+;
|
||||
$($(#[$meta_before:meta])+ $fn_name_before:ident $lanes_before:literal;)*
|
||||
$fn_name:ident $lanes:literal;
|
||||
$($fn_name_rest:ident $lanes_rest:literal;)*
|
||||
) => {
|
||||
$crate::test_lanes_helper!(
|
||||
$(#[$meta])+;
|
||||
$($(#[$meta_before])+ $fn_name_before $lanes_before;)*
|
||||
$(#[$meta])+ $fn_name $lanes;
|
||||
$($fn_name_rest $lanes_rest;)*
|
||||
);
|
||||
};
|
||||
(
|
||||
$(#[$meta_ignored:meta])+;
|
||||
$($(#[$meta:meta])+ $fn_name:ident $lanes:literal;)+
|
||||
) => {
|
||||
$crate::test_lanes_helper!($($(#[$meta])+ $fn_name $lanes;)+);
|
||||
};
|
||||
}
|
||||
|
||||
/// Expand a const-generic test into separate tests for each possible lane count.
|
||||
#[macro_export]
|
||||
macro_rules! test_lanes {
|
||||
@ -345,57 +401,96 @@ mod $test {
|
||||
|
||||
fn implementation<const $lanes: usize>()
|
||||
where
|
||||
core_simd::LaneCount<$lanes>: core_simd::SupportedLaneCount,
|
||||
core_simd::simd::LaneCount<$lanes>: core_simd::simd::SupportedLaneCount,
|
||||
$body
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
fn lanes_1() {
|
||||
implementation::<1>();
|
||||
}
|
||||
$crate::test_lanes_helper!(
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)];
|
||||
lanes_1 1;
|
||||
lanes_2 2;
|
||||
lanes_4 4;
|
||||
);
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
fn lanes_2() {
|
||||
implementation::<2>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
fn lanes_4() {
|
||||
implementation::<4>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
|
||||
fn lanes_8() {
|
||||
implementation::<8>();
|
||||
}
|
||||
$crate::test_lanes_helper!(
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)];
|
||||
lanes_8 8;
|
||||
lanes_16 16;
|
||||
lanes_32 32;
|
||||
lanes_64 64;
|
||||
);
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
|
||||
fn lanes_16() {
|
||||
implementation::<16>();
|
||||
}
|
||||
#[cfg(feature = "all_lane_counts")]
|
||||
$crate::test_lanes_helper!(
|
||||
// test some odd and even non-power-of-2 lengths on miri
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)];
|
||||
lanes_3 3;
|
||||
lanes_5 5;
|
||||
lanes_6 6;
|
||||
);
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
#[cfg(feature = "all_lane_counts")]
|
||||
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
|
||||
fn lanes_32() {
|
||||
implementation::<32>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
||||
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
|
||||
fn lanes_64() {
|
||||
implementation::<64>();
|
||||
}
|
||||
$crate::test_lanes_helper!(
|
||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)];
|
||||
lanes_7 7;
|
||||
lanes_9 9;
|
||||
lanes_10 10;
|
||||
lanes_11 11;
|
||||
lanes_12 12;
|
||||
lanes_13 13;
|
||||
lanes_14 14;
|
||||
lanes_15 15;
|
||||
lanes_17 17;
|
||||
lanes_18 18;
|
||||
lanes_19 19;
|
||||
lanes_20 20;
|
||||
lanes_21 21;
|
||||
lanes_22 22;
|
||||
lanes_23 23;
|
||||
lanes_24 24;
|
||||
lanes_25 25;
|
||||
lanes_26 26;
|
||||
lanes_27 27;
|
||||
lanes_28 28;
|
||||
lanes_29 29;
|
||||
lanes_30 30;
|
||||
lanes_31 31;
|
||||
lanes_33 33;
|
||||
lanes_34 34;
|
||||
lanes_35 35;
|
||||
lanes_36 36;
|
||||
lanes_37 37;
|
||||
lanes_38 38;
|
||||
lanes_39 39;
|
||||
lanes_40 40;
|
||||
lanes_41 41;
|
||||
lanes_42 42;
|
||||
lanes_43 43;
|
||||
lanes_44 44;
|
||||
lanes_45 45;
|
||||
lanes_46 46;
|
||||
lanes_47 47;
|
||||
lanes_48 48;
|
||||
lanes_49 49;
|
||||
lanes_50 50;
|
||||
lanes_51 51;
|
||||
lanes_52 52;
|
||||
lanes_53 53;
|
||||
lanes_54 54;
|
||||
lanes_55 55;
|
||||
lanes_56 56;
|
||||
lanes_57 57;
|
||||
lanes_58 58;
|
||||
lanes_59 59;
|
||||
lanes_60 60;
|
||||
lanes_61 61;
|
||||
lanes_62 62;
|
||||
lanes_63 63;
|
||||
);
|
||||
}
|
||||
)*
|
||||
}
|
||||
@ -413,50 +508,93 @@ mod $test {
|
||||
|
||||
fn implementation<const $lanes: usize>()
|
||||
where
|
||||
core_simd::LaneCount<$lanes>: core_simd::SupportedLaneCount,
|
||||
core_simd::simd::LaneCount<$lanes>: core_simd::simd::SupportedLaneCount,
|
||||
$body
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_1() {
|
||||
implementation::<1>();
|
||||
}
|
||||
$crate::test_lanes_helper!(
|
||||
#[should_panic];
|
||||
lanes_1 1;
|
||||
lanes_2 2;
|
||||
lanes_4 4;
|
||||
);
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_2() {
|
||||
implementation::<2>();
|
||||
}
|
||||
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
|
||||
$crate::test_lanes_helper!(
|
||||
#[should_panic];
|
||||
lanes_8 8;
|
||||
lanes_16 16;
|
||||
lanes_32 32;
|
||||
lanes_64 64;
|
||||
);
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_4() {
|
||||
implementation::<4>();
|
||||
}
|
||||
#[cfg(feature = "all_lane_counts")]
|
||||
$crate::test_lanes_helper!(
|
||||
// test some odd and even non-power-of-2 lengths on miri
|
||||
#[should_panic];
|
||||
lanes_3 3;
|
||||
lanes_5 5;
|
||||
lanes_6 6;
|
||||
);
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_8() {
|
||||
implementation::<8>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_16() {
|
||||
implementation::<16>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_32() {
|
||||
implementation::<32>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn lanes_64() {
|
||||
implementation::<64>();
|
||||
}
|
||||
#[cfg(feature = "all_lane_counts")]
|
||||
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
|
||||
$crate::test_lanes_helper!(
|
||||
#[should_panic];
|
||||
lanes_7 7;
|
||||
lanes_9 9;
|
||||
lanes_10 10;
|
||||
lanes_11 11;
|
||||
lanes_12 12;
|
||||
lanes_13 13;
|
||||
lanes_14 14;
|
||||
lanes_15 15;
|
||||
lanes_17 17;
|
||||
lanes_18 18;
|
||||
lanes_19 19;
|
||||
lanes_20 20;
|
||||
lanes_21 21;
|
||||
lanes_22 22;
|
||||
lanes_23 23;
|
||||
lanes_24 24;
|
||||
lanes_25 25;
|
||||
lanes_26 26;
|
||||
lanes_27 27;
|
||||
lanes_28 28;
|
||||
lanes_29 29;
|
||||
lanes_30 30;
|
||||
lanes_31 31;
|
||||
lanes_33 33;
|
||||
lanes_34 34;
|
||||
lanes_35 35;
|
||||
lanes_36 36;
|
||||
lanes_37 37;
|
||||
lanes_38 38;
|
||||
lanes_39 39;
|
||||
lanes_40 40;
|
||||
lanes_41 41;
|
||||
lanes_42 42;
|
||||
lanes_43 43;
|
||||
lanes_44 44;
|
||||
lanes_45 45;
|
||||
lanes_46 46;
|
||||
lanes_47 47;
|
||||
lanes_48 48;
|
||||
lanes_49 49;
|
||||
lanes_50 50;
|
||||
lanes_51 51;
|
||||
lanes_52 52;
|
||||
lanes_53 53;
|
||||
lanes_54 54;
|
||||
lanes_55 55;
|
||||
lanes_56 56;
|
||||
lanes_57 57;
|
||||
lanes_58 58;
|
||||
lanes_59 59;
|
||||
lanes_60 60;
|
||||
lanes_61 61;
|
||||
lanes_62 62;
|
||||
lanes_63 63;
|
||||
);
|
||||
}
|
||||
)*
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user