Fix casts, add tests

This commit is contained in:
Caleb Zulawski 2020-10-11 14:32:46 -04:00
parent 75fdacde1c
commit 3d8721b053
6 changed files with 150 additions and 26 deletions

View File

@ -36,4 +36,7 @@
/// xor
pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
/// fptoui/fptosi/uitofp/sitofp
pub(crate) fn simd_cast<T, U>(x: T) -> U;
}

View File

@ -6,7 +6,6 @@ impl $type:ident {
ceil = $ceil_intrinsic:literal,
round = $round_intrinsic:literal,
trunc = $trunc_intrinsic:literal,
round_to_int = $round_to_int_intrinsic:literal,
}
} => {
mod $type {
@ -20,8 +19,6 @@ mod $type {
fn round_intrinsic(x: crate::$type) -> crate::$type;
#[link_name = $trunc_intrinsic]
fn trunc_intrinsic(x: crate::$type) -> crate::$type;
#[link_name = $round_to_int_intrinsic]
fn round_to_int_intrinsic(x: crate::$type) -> crate::$int_type;
}
impl crate::$type {
@ -60,11 +57,24 @@ pub fn fract(self) -> Self {
self - self.trunc()
}
/// Returns the nearest integer to each lane. Round half-way cases away from 0.0.
#[must_use = "method returns a new vector and does not mutate the original value"]
/// Rounds toward zero and converts to the same-width integer type, assuming that
/// the value is finite and fits in that type.
///
/// # Safety
/// The value must:
///
/// * Not be NaN
/// * Not be infinite
/// * Be representable in the return type, after truncating off its fractional part
#[inline]
pub fn round_to_int(self) -> crate::$int_type {
unsafe { round_to_int_intrinsic(self) }
pub unsafe fn to_int_unchecked(self) -> crate::$int_type {
crate::intrinsics::simd_cast(self)
}
/// Returns the nearest integer to each lane. Round half-way cases away from 0.0.
#[inline]
pub fn round_from_int(value: crate::$int_type) -> Self {
unsafe { crate::intrinsics::simd_cast(value) }
}
}
}
@ -78,7 +88,6 @@ impl f32x2 {
ceil = "llvm.ceil.v2f32",
round = "llvm.round.v2f32",
trunc = "llvm.trunc.v2f32",
round_to_int = "llvm.lround.i32.v2f32",
}
}
@ -89,7 +98,6 @@ impl f32x4 {
ceil = "llvm.ceil.v4f32",
round = "llvm.round.v4f32",
trunc = "llvm.trunc.v4f32",
round_to_int = "llvm.lround.i32.v4f32",
}
}
@ -100,7 +108,6 @@ impl f32x8 {
ceil = "llvm.ceil.v8f32",
round = "llvm.round.v8f32",
trunc = "llvm.trunc.v8f32",
round_to_int = "llvm.lround.i32.v8f32",
}
}
@ -111,7 +118,6 @@ impl f32x16 {
ceil = "llvm.ceil.v16f32",
round = "llvm.round.v16f32",
trunc = "llvm.trunc.v16f32",
round_to_int = "llvm.lround.i32.v16f32",
}
}
@ -122,7 +128,6 @@ impl f64x2 {
ceil = "llvm.ceil.v2f64",
round = "llvm.round.v2f64",
trunc = "llvm.trunc.v2f64",
round_to_int = "llvm.lround.i64.v2f64",
}
}
@ -133,7 +138,6 @@ impl f64x4 {
ceil = "llvm.ceil.v4f64",
round = "llvm.round.v4f64",
trunc = "llvm.trunc.v4f64",
round_to_int = "llvm.lround.i64.v4f64",
}
}
@ -144,6 +148,5 @@ impl f64x8 {
ceil = "llvm.ceil.v8f64",
round = "llvm.round.v8f64",
trunc = "llvm.trunc.v8f64",
round_to_int = "llvm.lround.i64.v8f64",
}
}

View File

@ -1,8 +1,13 @@
pub fn apply_unary_lanewise<T: Copy, V: AsMut<[T]> + Default>(mut x: V, f: impl Fn(T) -> T) -> V {
for lane in x.as_mut() {
*lane = f(*lane)
pub fn apply_unary_lanewise<T1: Copy, T2: Copy, V1: AsRef<[T1]>, V2: AsMut<[T2]> + Default>(
x: V1,
f: impl Fn(T1) -> T2,
) -> V2 {
let mut y = V2::default();
assert_eq!(x.as_ref().len(), y.as_mut().len());
for (x, y) in x.as_ref().iter().zip(y.as_mut().iter_mut()) {
*y = f(*x);
}
x
y
}
pub fn apply_binary_lanewise<T: Copy, V: AsRef<[T]> + AsMut<[T]> + Default>(

View File

@ -1,6 +1,6 @@
use super::helpers;
float_tests! { f32x2, f32 }
float_tests! { f32x4, f32 }
float_tests! { f32x8, f32 }
float_tests! { f32x16, f32 }
float_tests! { f32x2, f32, i32x2, i32 }
float_tests! { f32x4, f32, i32x4, i32 }
float_tests! { f32x8, f32, i32x8, i32 }
float_tests! { f32x16, f32, i32x16, i32 }

View File

@ -1,5 +1,5 @@
use super::helpers;
float_tests! { f64x2, f64 }
float_tests! { f64x4, f64 }
float_tests! { f64x8, f64 }
float_tests! { f64x2, f64, i64x2, i64 }
float_tests! { f64x4, f64, i64x4, i64 }
float_tests! { f64x8, f64, i64x8, i64 }

View File

@ -1,5 +1,5 @@
macro_rules! float_tests {
{ $vector:ident, $scalar:ident } => {
{ $vector:ident, $scalar:ident, $int_vector:ident, $int_scalar:ident } => {
#[cfg(test)]
mod $vector {
use super::*;
@ -24,6 +24,18 @@ fn slice_chunks(slice: &[$scalar]) -> impl Iterator<Item = core_simd::$vector> +
slice.chunks_exact(lanes).map(from_slice)
}
fn from_slice_int(slice: &[$int_scalar]) -> core_simd::$int_vector {
let mut value = core_simd::$int_vector::default();
let value_slice: &mut [_] = value.as_mut();
value_slice.copy_from_slice(&slice[0..value_slice.len()]);
value
}
fn slice_chunks_int(slice: &[$int_scalar]) -> impl Iterator<Item = core_simd::$int_vector> + '_ {
let lanes = core::mem::size_of::<core_simd::$int_vector>() / core::mem::size_of::<$int_scalar>();
slice.chunks_exact(lanes).map(from_slice_int)
}
const A: [$scalar; 16] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.];
const B: [$scalar; 16] = [16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31.];
const C: [$scalar; 16] = [
@ -322,6 +334,107 @@ fn abs_odd_floats() {
assert_biteq!(v.abs(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn ceil_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::ceil);
assert_biteq!(v.ceil(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn floor_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::floor);
assert_biteq!(v.floor(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn round_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::round);
assert_biteq!(v.round(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn trunc_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::trunc);
assert_biteq!(v.trunc(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn fract_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::fract);
assert_biteq!(v.fract(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn to_int_unchecked() {
const VALUES: [$scalar; 16] = [
-0.0,
0.0,
-1.0,
1.0,
<$scalar>::MIN,
-<$scalar>::MIN,
<$scalar>::MIN_POSITIVE,
-<$scalar>::MIN_POSITIVE,
<$scalar>::EPSILON,
-<$scalar>::EPSILON,
core::$scalar::consts::PI,
-core::$scalar::consts::PI,
core::$scalar::consts::TAU,
-core::$scalar::consts::TAU,
100.0 / 3.0,
-100.0 / 3.0,
];
for v in slice_chunks(&VALUES) {
let expected = apply_unary_lanewise(v, |x| unsafe { x.to_int_unchecked() });
assert_biteq!(unsafe { v.to_int_unchecked() }, expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn round_from_int() {
const VALUES: [$int_scalar; 16] = [
0,
0,
1,
-1,
100,
-100,
200,
-200,
413,
-413,
1017,
-1017,
1234567,
-1234567,
<$int_scalar>::MAX,
<$int_scalar>::MIN,
];
for v in slice_chunks_int(&VALUES) {
let expected = apply_unary_lanewise(v, |x| x as $scalar);
assert_biteq!(core_simd::$vector::round_from_int(v), expected);
}
}
}
}
}