Merge pull request #72 from rust-lang/feature/proptest

proptest
This replaces most tests with proptest, and makes it easier to define tests generically over lane count.  This should provide much broader API coverage and give us more confidence in our implementation.
This commit is contained in:
Jubilee 2021-02-15 23:27:42 -08:00 committed by GitHub
commit f85bd249c0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
55 changed files with 1042 additions and 1639 deletions

View file

@ -1,3 +1,7 @@
branches:
only:
- master
language: rust
rust:
- nightly

View file

@ -2,4 +2,5 @@
members = [
"crates/core_simd",
"crates/test_helpers",
]

View file

@ -14,3 +14,11 @@ version = "0.2"
[dev-dependencies.wasm-bindgen-test]
version = "0.3"
[dev-dependencies.proptest]
version = "0.10"
default-features = false
features = ["alloc"]
[dev-dependencies.test_helpers]
path = "../test_helpers"

View file

@ -141,6 +141,12 @@ macro_rules! impl_vector {
}
}
impl <const LANES: usize> From<$name<LANES>> for [$type; LANES] where $name<LANES>: crate::LanesAtMost64 {
fn from(vector: $name<LANES>) -> Self {
vector.to_array()
}
}
// splat
impl<const LANES: usize> From<$type> for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_float_tests! { SimdF32, f32, i32 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_float_tests! { SimdF64, f64, i64 }

View file

@ -1,120 +0,0 @@
pub(crate) trait BitEq {
fn biteq(&self, other: &Self) -> bool;
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result;
}
macro_rules! impl_biteq {
{ integer impl BitEq for $($type:ty,)* } => {
$(
impl BitEq for $type {
fn biteq(&self, other: &Self) -> bool {
self == other
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?} ({:x})", self, self)
}
}
)*
};
{ float impl BitEq for $($type:ty,)* } => {
$(
impl BitEq for $type {
fn biteq(&self, other: &Self) -> bool {
self.to_bits() == other.to_bits()
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?} ({:x})", self, self.to_bits())
}
}
)*
};
{ vector impl BitEq for $($type:ty,)* } => {
$(
impl BitEq for $type {
fn biteq(&self, other: &Self) -> bool {
let a: &[_] = self.as_ref();
let b: &[_] = other.as_ref();
if a.len() == b.len() {
a.iter().zip(b.iter()).fold(true, |value, (left, right)| {
value && left.biteq(right)
})
} else {
false
}
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
#[repr(transparent)]
struct Wrapper<'a, T: BitEq>(&'a T);
impl<T: BitEq> core::fmt::Debug for Wrapper<'_, T> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
self.0.fmt(f)
}
}
let slice: &[_] = self.as_ref();
f.debug_list()
.entries(slice.iter().map(|x| Wrapper(x)))
.finish()
}
}
)*
};
}
impl_biteq! {
integer impl BitEq for
u8, u16, u32, u64, u128, usize,
i8, i16, i32, i64, i128, isize,
}
impl_biteq! {
float impl BitEq for f32, f64,
}
impl_biteq! {
vector impl BitEq for
core_simd::u8x8, core_simd::u8x16, core_simd::u8x32, core_simd::u8x64,
core_simd::i8x8, core_simd::i8x16, core_simd::i8x32, core_simd::i8x64,
core_simd::u16x4, core_simd::u16x8, core_simd::u16x16, core_simd::u16x32,
core_simd::i16x4, core_simd::i16x8, core_simd::i16x16, core_simd::i16x32,
core_simd::u32x2, core_simd::u32x4, core_simd::u32x8, core_simd::u32x16,
core_simd::i32x2, core_simd::i32x4, core_simd::i32x8, core_simd::i32x16,
core_simd::u64x2, core_simd::u64x4, core_simd::u64x8,
core_simd::i64x2, core_simd::i64x4, core_simd::i64x8,
core_simd::u128x2, core_simd::u128x4,
core_simd::i128x2, core_simd::i128x4,
core_simd::usizex2, core_simd::usizex4, core_simd::usizex8,
core_simd::isizex2, core_simd::isizex4, core_simd::isizex8,
core_simd::f32x2, core_simd::f32x4, core_simd::f32x8, core_simd::f32x16,
core_simd::f64x2, core_simd::f64x4, core_simd::f64x8,
}
pub(crate) struct BitEqWrapper<'a, T>(pub(crate) &'a T);
impl<T: BitEq> PartialEq for BitEqWrapper<'_, T> {
fn eq(&self, other: &Self) -> bool {
self.0.biteq(other.0)
}
}
impl<T: BitEq> core::fmt::Debug for BitEqWrapper<'_, T> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
self.0.fmt(f)
}
}
macro_rules! assert_biteq {
{ $a:expr, $b:expr } => {
{
use helpers::biteq::BitEqWrapper;
let a = $a;
let b = $b;
assert_eq!(BitEqWrapper(&a), BitEqWrapper(&b));
}
}
}

View file

@ -1,61 +0,0 @@
//! These helpers provide a way to easily emulate a vectorized SIMD op on two SIMD vectors,
//! except using scalar ops that iterate through each lane, one at a time, so as to remove
//! the vagaries of compilation.
//!
//! Do note, however, that when testing that vectorized operations #[should_panic], these
//! "scalarized SIMD ops" will trigger scalar code paths that may also normally panic.
pub fn apply_unary_lanewise<T1: Copy, T2: Copy, V1: AsRef<[T1]>, V2: AsMut<[T2]> + Default>(
x: V1,
f: impl Fn(T1) -> T2,
) -> V2 {
let mut y = V2::default();
assert_eq!(x.as_ref().len(), y.as_mut().len());
for (x, y) in x.as_ref().iter().zip(y.as_mut().iter_mut()) {
*y = f(*x);
}
y
}
pub fn apply_binary_lanewise<T: Copy, V: AsRef<[T]> + AsMut<[T]> + Default>(
a: V,
b: V,
f: impl Fn(T, T) -> T,
) -> V {
let mut out = V::default();
let out_slice = out.as_mut();
let a_slice = a.as_ref();
let b_slice = b.as_ref();
for (o, (a, b)) in out_slice.iter_mut().zip(a_slice.iter().zip(b_slice.iter())) {
*o = f(*a, *b);
}
out
}
pub fn apply_binary_scalar_rhs_lanewise<T: Copy, V: AsRef<[T]> + AsMut<[T]> + Default>(
a: V,
b: T,
f: impl Fn(T, T) -> T,
) -> V {
let mut out = V::default();
let out_slice = out.as_mut();
let a_slice = a.as_ref();
for (o, a) in out_slice.iter_mut().zip(a_slice.iter()) {
*o = f(*a, b);
}
out
}
pub fn apply_binary_scalar_lhs_lanewise<T: Copy, V: AsRef<[T]> + AsMut<[T]> + Default>(
a: T,
b: V,
f: impl Fn(T, T) -> T,
) -> V {
let mut out = V::default();
let out_slice = out.as_mut();
let b_slice = b.as_ref();
for (o, b) in out_slice.iter_mut().zip(b_slice.iter()) {
*o = f(a, *b);
}
out
}

View file

@ -1,4 +0,0 @@
#[macro_use]
pub mod biteq;
pub mod lanewise;

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_signed_tests! { SimdI128, i128 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_signed_tests! { SimdI16, i16 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_signed_tests! { SimdI32, i32 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_signed_tests! { SimdI64, i64 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_signed_tests! { SimdI8, i8 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_signed_tests! { SimdIsize, isize }

View file

@ -0,0 +1 @@
mod mask_ops_impl;

View file

@ -0,0 +1,9 @@
#[macro_use]
mod mask_macros;
mod mask8;
mod mask16;
mod mask32;
mod mask64;
mod mask128;
mod masksize;

View file

@ -1 +0,0 @@
mod ops_impl;

View file

@ -1,6 +0,0 @@
use super::helpers;
float_tests! { f32x2, f32, i32x2, i32 }
float_tests! { f32x4, f32, i32x4, i32 }
float_tests! { f32x8, f32, i32x8, i32 }
float_tests! { f32x16, f32, i32x16, i32 }

View file

@ -1,5 +0,0 @@
use super::helpers;
float_tests! { f64x2, f64, i64x2, i64 }
float_tests! { f64x4, f64, i64x4, i64 }
float_tests! { f64x8, f64, i64x8, i64 }

View file

@ -1,418 +0,0 @@
macro_rules! float_tests {
{ $vector:ident, $scalar:ident, $int_vector:ident, $int_scalar:ident } => {
#[cfg(test)]
mod $vector {
use super::*;
use helpers::lanewise::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::*;
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test_configure!(run_in_browser);
// TODO impl this as an associated fn on vectors
fn from_slice(slice: &[$scalar]) -> core_simd::$vector {
let mut value = core_simd::$vector::default();
let value_slice: &mut [_] = value.as_mut();
value_slice.copy_from_slice(&slice[0..value_slice.len()]);
value
}
fn slice_chunks(slice: &[$scalar]) -> impl Iterator<Item = core_simd::$vector> + '_ {
let lanes = core::mem::size_of::<core_simd::$vector>() / core::mem::size_of::<$scalar>();
slice.chunks_exact(lanes).map(from_slice)
}
fn from_slice_int(slice: &[$int_scalar]) -> core_simd::$int_vector {
let mut value = core_simd::$int_vector::default();
let value_slice: &mut [_] = value.as_mut();
value_slice.copy_from_slice(&slice[0..value_slice.len()]);
value
}
fn slice_chunks_int(slice: &[$int_scalar]) -> impl Iterator<Item = core_simd::$int_vector> + '_ {
let lanes = core::mem::size_of::<core_simd::$int_vector>() / core::mem::size_of::<$int_scalar>();
slice.chunks_exact(lanes).map(from_slice_int)
}
const A: [$scalar; 16] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.];
const B: [$scalar; 16] = [16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31.];
const C: [$scalar; 16] = [
-0.0,
0.0,
-1.0,
1.0,
<$scalar>::MIN,
<$scalar>::MAX,
<$scalar>::INFINITY,
<$scalar>::NEG_INFINITY,
<$scalar>::MIN_POSITIVE,
-<$scalar>::MIN_POSITIVE,
<$scalar>::EPSILON,
-<$scalar>::EPSILON,
<$scalar>::NAN,
-<$scalar>::NAN,
// TODO: Would be nice to check sNaN...
100.0 / 3.0,
-100.0 / 3.0,
];
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Add::add);
a += b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_scalar_rhs() {
let a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_scalar_lhs() {
let a = 5.;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_assign_scalar() {
let mut a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add);
a += b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub);
a -= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_scalar_rhs() {
let a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_scalar_lhs() {
let a = 5.;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_assign_scalar() {
let mut a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub);
a -= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul);
a *= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_scalar_rhs() {
let a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_scalar_lhs() {
let a = 5.;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_assign_scalar() {
let mut a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul);
a *= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Div::div);
a /= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_scalar_rhs() {
let a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_scalar_lhs() {
let a = 5.;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_assign_scalar() {
let mut a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div);
a /= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem);
a %= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_scalar_rhs() {
let a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_scalar_lhs() {
let a = 5.;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_assign_scalar() {
let mut a = from_slice(&A);
let b = 5.;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem);
a %= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn neg() {
let v = from_slice(&A);
let expected = apply_unary_lanewise(v, core::ops::Neg::neg);
assert_biteq!(-v, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn neg_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, core::ops::Neg::neg);
assert_biteq!(-v, expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn abs_negative() {
let v = -from_slice(&A);
let expected = apply_unary_lanewise(v, <$scalar>::abs);
assert_biteq!(v.abs(), expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn abs_positive() {
let v = from_slice(&B);
let expected = apply_unary_lanewise(v, <$scalar>::abs);
assert_biteq!(v.abs(), expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn abs_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::abs);
assert_biteq!(v.abs(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn ceil_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::ceil);
assert_biteq!(v.ceil(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn floor_odd_floats() {
for v in slice_chunks(&C) {
let expected = apply_unary_lanewise(v, <$scalar>::floor);
assert_biteq!(v.floor(), expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn to_int_unchecked() {
// The maximum integer that can be represented by the equivalently sized float has
// all of the mantissa digits set to 1, pushed up to the MSB.
const ALL_MANTISSA_BITS: $int_scalar = ((1 << <$scalar>::MANTISSA_DIGITS) - 1);
const MAX_REPRESENTABLE_VALUE: $int_scalar =
ALL_MANTISSA_BITS << (core::mem::size_of::<$scalar>() * 8 - <$scalar>::MANTISSA_DIGITS as usize - 1);
const VALUES: [$scalar; 16] = [
-0.0,
0.0,
-1.0,
1.0,
ALL_MANTISSA_BITS as $scalar,
-ALL_MANTISSA_BITS as $scalar,
MAX_REPRESENTABLE_VALUE as $scalar,
-MAX_REPRESENTABLE_VALUE as $scalar,
(MAX_REPRESENTABLE_VALUE / 2) as $scalar,
(-MAX_REPRESENTABLE_VALUE / 2) as $scalar,
<$scalar>::MIN_POSITIVE,
-<$scalar>::MIN_POSITIVE,
<$scalar>::EPSILON,
-<$scalar>::EPSILON,
100.0 / 3.0,
-100.0 / 3.0,
];
for v in slice_chunks(&VALUES) {
let expected = apply_unary_lanewise(v, |x| unsafe { x.to_int_unchecked() });
assert_biteq!(unsafe { v.to_int_unchecked() }, expected);
}
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn round_from_int() {
const VALUES: [$int_scalar; 16] = [
0,
0,
1,
-1,
100,
-100,
200,
-200,
413,
-413,
1017,
-1017,
1234567,
-1234567,
<$int_scalar>::MAX,
<$int_scalar>::MIN,
];
for v in slice_chunks_int(&VALUES) {
let expected = apply_unary_lanewise(v, |x| x as $scalar);
assert_biteq!(core_simd::$vector::round_from_int(v), expected);
}
}
}
}
}

View file

@ -1,4 +0,0 @@
use super::helpers;
int_tests! { i128x2, i128 }
int_tests! { i128x4, i128 }

View file

@ -1,6 +0,0 @@
use super::helpers;
int_tests! { i16x4, i16 }
int_tests! { i16x8, i16 }
int_tests! { i16x16, i16 }
int_tests! { i16x32, i16 }

View file

@ -1,6 +0,0 @@
use super::helpers;
int_tests! { i32x2, i32 }
int_tests! { i32x4, i32 }
int_tests! { i32x8, i32 }
int_tests! { i32x16, i32 }

View file

@ -1,5 +0,0 @@
use super::helpers;
int_tests! { i64x2, i64 }
int_tests! { i64x4, i64 }
int_tests! { i64x8, i64 }

View file

@ -1,6 +0,0 @@
use super::helpers;
int_tests! { i8x8, i8 }
int_tests! { i8x16, i8 }
int_tests! { i8x32, i8 }
int_tests! { i8x64, i8 }

View file

@ -1,493 +0,0 @@
macro_rules! int_tests {
{ $vector:ident, $scalar:ident } => {
#[cfg(test)]
mod $vector {
use super::*;
use helpers::lanewise::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::*;
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test_configure!(run_in_browser);
// TODO impl this as an associated fn on vectors
fn from_slice(slice: &[$scalar]) -> core_simd::$vector {
let mut value = core_simd::$vector::default();
let value_slice: &mut [_] = value.as_mut();
value_slice.copy_from_slice(&slice[0..value_slice.len()]);
value
}
const A: [$scalar; 64] = [
7, 7, 7, 7, -7, -7, -7, -7,
6, 6, 6, 6, -6, -6, -6, -6,
5, 5, 5, 5, -5, -5, -5, -5,
4, 4, 4, 4, -4, -4, -4, -4,
3, 3, 3, 3, -3, -3, -3, -3,
2, 2, 2, 2, -2, -2, -2, -2,
1, 1, 1, 1, -1, -1, -1, -1,
0, 0, 0, 0, 0, 0, 0, 0,
];
const B: [$scalar; 64] = [
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
-1, -2, -3, -4, -5, -6, -7, -8,
-1, -2, -3, -4, -5, -6, -7, -8,
-1, -2, -3, -4, -5, -6, -7, -8,
-1, -2, -3, -4, -5, -6, -7, -8,
];
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Add::add);
a += b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add);
a += b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub);
a -= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub);
a -= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul);
a *= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul);
a *= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Div::div);
a /= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div);
a /= b;
assert_biteq!(a, expected);
}
#[test]
#[should_panic]
fn div_min_panics() {
let a = from_slice(&vec![$scalar::MIN; 64]);
let b = from_slice(&vec![-1; 64]);
let _ = a / b;
}
#[test]
#[should_panic]
fn div_by_all_zeros_panics() {
let a = from_slice(&A);
let b = from_slice(&vec![0 ; 64]);
let _ = a / b;
}
#[test]
#[should_panic]
fn div_by_one_zero_panics() {
let a = from_slice(&A);
let mut b = from_slice(&B);
b[0] = 0 as _;
let _ = a / b;
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_min_neg_one_no_panic() {
let a = from_slice(&A);
let b = from_slice(&vec![-1; 64]);
let _ = a / b;
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem);
a %= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem);
a %= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_min_neg_one_no_panic() {
let a = from_slice(&A);
let b = from_slice(&vec![-1; 64]);
let _ = a % b;
}
#[test]
#[should_panic]
fn rem_min_panic() {
let a = from_slice(&vec![$scalar::MIN; 64]);
let b = from_slice(&vec![-1 ; 64]);
let _ = a % b;
}
#[test]
#[should_panic]
fn rem_min_zero_panic() {
let a = from_slice(&A);
let b = from_slice(&vec![0 ; 64]);
let _ = a % b;
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand);
assert_biteq!(a & b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand);
a &= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand);
assert_biteq!(a & b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitAnd::bitand);
assert_biteq!(a & b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand);
a &= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor);
assert_biteq!(a | b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor);
a |= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor);
assert_biteq!(a | b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitOr::bitor);
assert_biteq!(a | b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor);
a |= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor);
assert_biteq!(a ^ b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor);
a ^= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor);
assert_biteq!(a ^ b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitXor::bitxor);
assert_biteq!(a ^ b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor);
a ^= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn neg() {
let v = from_slice(&A);
let expected = apply_unary_lanewise(v, core::ops::Neg::neg);
assert_biteq!(-v, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn not() {
let v = from_slice(&A);
let expected = apply_unary_lanewise(v, core::ops::Not::not);
assert_biteq!(!v, expected);
}
}
}
}

View file

@ -1,5 +0,0 @@
use super::helpers;
int_tests! { isizex2, isize }
int_tests! { isizex4, isize }
int_tests! { isizex8, isize }

View file

@ -1,39 +0,0 @@
#[macro_use]
#[path = "../helpers/mod.rs"]
mod helpers;
#[macro_use]
mod float_macros;
mod r#f32;
mod r#f64;
#[macro_use]
mod int_macros;
mod r#i8;
mod r#i16;
mod r#i32;
mod r#i64;
mod r#i128;
mod r#isize;
#[macro_use]
mod uint_macros;
mod r#u8;
mod r#u16;
mod r#u32;
mod r#u64;
mod r#u128;
mod r#usize;
#[macro_use]
mod mask_macros;
mod mask8;
mod mask16;
mod mask32;
mod mask64;
mod mask128;
mod masksize;

View file

@ -1,4 +0,0 @@
use super::helpers;
uint_tests! { u128x2, u128 }
uint_tests! { u128x4, u128 }

View file

@ -1,6 +0,0 @@
use super::helpers;
uint_tests! { u16x4, u16 }
uint_tests! { u16x8, u16 }
uint_tests! { u16x16, u16 }
uint_tests! { u16x32, u16 }

View file

@ -1,6 +0,0 @@
use super::helpers;
uint_tests! { u32x2, u32 }
uint_tests! { u32x4, u32 }
uint_tests! { u32x8, u32 }
uint_tests! { u32x16, u32 }

View file

@ -1,5 +0,0 @@
use super::helpers;
uint_tests! { u64x2, u64 }
uint_tests! { u64x4, u64 }
uint_tests! { u64x8, u64 }

View file

@ -1,6 +0,0 @@
use super::helpers;
uint_tests! { u8x8, u8 }
uint_tests! { u8x16, u8 }
uint_tests! { u8x32, u8 }
uint_tests! { u8x64, u8 }

View file

@ -1,428 +0,0 @@
macro_rules! uint_tests {
{ $vector:ident, $scalar:ident } => {
#[cfg(test)]
mod $vector {
use super::*;
use helpers::lanewise::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::*;
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test_configure!(run_in_browser);
// TODO impl this as an associated fn on vectors
fn from_slice(slice: &[$scalar]) -> core_simd::$vector {
let mut value = core_simd::$vector::default();
let value_slice: &mut [_] = value.as_mut();
value_slice.copy_from_slice(&slice[0..value_slice.len()]);
value
}
const A: [$scalar; 64] = [
16, 16, 16, 16, 16, 16, 16, 16,
14, 14, 14, 14, 14, 14, 14, 14,
12, 12, 12, 12, 12, 12, 12, 12,
10, 10, 10, 10, 10, 10, 10, 10,
8, 8, 8, 8, 8, 8, 8, 8,
6, 6, 6, 6, 6, 6, 7, 8,
4, 4, 4, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
];
const B: [$scalar; 64] = [
1, 2, 3, 4, 1, 2, 3, 4,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
];
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Add::add);
a += b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Add::add);
assert_biteq!(a + b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn add_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add);
a += b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub);
a -= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_scalar_rhs() {
let a = from_slice(&A);
let b = 1;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_scalar_lhs() {
let a = 40;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Sub::sub);
assert_biteq!(a - b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sub_assign_scalar() {
let mut a = from_slice(&A);
let b = 1;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub);
a -= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul);
a *= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Mul::mul);
assert_biteq!(a * b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn mul_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul);
a *= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Div::div);
a /= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Div::div);
assert_biteq!(a / b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn div_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div);
a /= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem);
a %= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Rem::rem);
assert_biteq!(a % b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn rem_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem);
a %= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand);
assert_biteq!(a & b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand);
a &= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand);
assert_biteq!(a & b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitAnd::bitand);
assert_biteq!(a & b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitand_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand);
a &= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor);
assert_biteq!(a | b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor);
a |= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor);
assert_biteq!(a | b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitOr::bitor);
assert_biteq!(a | b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitor_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor);
a |= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor() {
let a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor);
assert_biteq!(a ^ b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_assign() {
let mut a = from_slice(&A);
let b = from_slice(&B);
let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor);
a ^= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_scalar_rhs() {
let a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor);
assert_biteq!(a ^ b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_scalar_lhs() {
let a = 5;
let b = from_slice(&B);
let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitXor::bitxor);
assert_biteq!(a ^ b, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn bitxor_assign_scalar() {
let mut a = from_slice(&A);
let b = 5;
let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor);
a ^= b;
assert_biteq!(a, expected);
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn not() {
let v = from_slice(&A);
let expected = apply_unary_lanewise(v, core::ops::Not::not);
assert_biteq!(!v, expected);
}
}
}
}

View file

@ -1,5 +0,0 @@
use super::helpers;
uint_tests! { usizex2, usize }
uint_tests! { usizex4, usize }
uint_tests! { usizex8, usize }

View file

@ -0,0 +1,328 @@
/// Implements a test on a unary operation using proptest.
///
/// Compares the vector operation to the equivalent scalar operation.
#[macro_export]
macro_rules! impl_unary_op_test {
{ $vector:ty, $scalar:ty, $trait:ident :: $fn:ident, $scalar_fn:expr } => {
test_helpers::test_lanes! {
fn $fn<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&<$vector as core::ops::$trait>::$fn,
&$scalar_fn,
&|_| true,
);
}
}
};
{ $vector:ty, $scalar:ty, $trait:ident :: $fn:ident } => {
impl_unary_op_test! { $vector, $scalar, $trait::$fn, <$scalar as core::ops::$trait>::$fn }
};
}
/// Implements a test on a binary operation using proptest.
///
/// Compares the vector operation to the equivalent scalar operation.
#[macro_export]
macro_rules! impl_binary_op_test {
{ $vector:ty, $scalar:ty, $trait:ident :: $fn:ident, $trait_assign:ident :: $fn_assign:ident, $scalar_fn:expr } => {
mod $fn {
use super::*;
test_helpers::test_lanes! {
fn normal<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&<$vector as core::ops::$trait>::$fn,
&$scalar_fn,
&|_, _| true,
);
}
fn scalar_rhs<const LANES: usize>() {
test_helpers::test_binary_scalar_rhs_elementwise(
&<$vector as core::ops::$trait<$scalar>>::$fn,
&$scalar_fn,
&|_, _| true,
);
}
fn scalar_lhs<const LANES: usize>() {
test_helpers::test_binary_scalar_lhs_elementwise(
&<$scalar as core::ops::$trait<$vector>>::$fn,
&$scalar_fn,
&|_, _| true,
);
}
fn assign<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&|mut a, b| { <$vector as core::ops::$trait_assign>::$fn_assign(&mut a, b); a },
&$scalar_fn,
&|_, _| true,
);
}
fn assign_scalar_rhs<const LANES: usize>() {
test_helpers::test_binary_scalar_rhs_elementwise(
&|mut a, b| { <$vector as core::ops::$trait_assign<$scalar>>::$fn_assign(&mut a, b); a },
&$scalar_fn,
&|_, _| true,
);
}
}
}
};
{ $vector:ty, $scalar:ty, $trait:ident :: $fn:ident, $trait_assign:ident :: $fn_assign:ident } => {
impl_binary_op_test! { $vector, $scalar, $trait::$fn, $trait_assign::$fn_assign, <$scalar as core::ops::$trait>::$fn }
};
}
/// Implements a test on a binary operation using proptest.
///
/// Like `impl_binary_op_test`, but allows providing a function for rejecting particular inputs
/// (like the `proptest_assume` macro).
///
/// Compares the vector operation to the equivalent scalar operation.
#[macro_export]
macro_rules! impl_binary_checked_op_test {
{ $vector:ty, $scalar:ty, $trait:ident :: $fn:ident, $trait_assign:ident :: $fn_assign:ident, $scalar_fn:expr, $check_fn:expr } => {
mod $fn {
use super::*;
test_helpers::test_lanes! {
fn normal<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&<$vector as core::ops::$trait>::$fn,
&$scalar_fn,
&|x, y| x.iter().zip(y.iter()).all(|(x, y)| $check_fn(*x, *y)),
);
}
fn scalar_rhs<const LANES: usize>() {
test_helpers::test_binary_scalar_rhs_elementwise(
&<$vector as core::ops::$trait<$scalar>>::$fn,
&$scalar_fn,
&|x, y| x.iter().all(|x| $check_fn(*x, y)),
);
}
fn scalar_lhs<const LANES: usize>() {
test_helpers::test_binary_scalar_lhs_elementwise(
&<$scalar as core::ops::$trait<$vector>>::$fn,
&$scalar_fn,
&|x, y| y.iter().all(|y| $check_fn(x, *y)),
);
}
fn assign<const LANES: usize>() {
test_helpers::test_binary_elementwise(
&|mut a, b| { <$vector as core::ops::$trait_assign>::$fn_assign(&mut a, b); a },
&$scalar_fn,
&|x, y| x.iter().zip(y.iter()).all(|(x, y)| $check_fn(*x, *y)),
)
}
fn assign_scalar_rhs<const LANES: usize>() {
test_helpers::test_binary_scalar_rhs_elementwise(
&|mut a, b| { <$vector as core::ops::$trait_assign<$scalar>>::$fn_assign(&mut a, b); a },
&$scalar_fn,
&|x, y| x.iter().all(|x| $check_fn(*x, y)),
)
}
}
}
};
{ $vector:ty, $scalar:ty, $trait:ident :: $fn:ident, $trait_assign:ident :: $fn_assign:ident, $check_fn:expr } => {
impl_binary_nonzero_rhs_op_test! { $vector, $scalar, $trait::$fn, $trait_assign::$fn_assign, <$scalar as core::ops::$trait>::$fn, $check_fn }
};
}
/// Implement tests for signed integers.
#[macro_export]
macro_rules! impl_signed_tests {
{ $vector:ident, $scalar:tt } => {
mod $scalar {
type Vector<const LANES: usize> = core_simd::$vector<LANES>;
type Scalar = $scalar;
test_helpers::test_lanes! {
fn neg<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&<Vector<LANES> as core::ops::Neg>::neg,
&<Scalar as core::ops::Neg>::neg,
&|x| !x.contains(&Scalar::MIN),
);
}
}
test_helpers::test_lanes_panic! {
fn div_min_overflow_panics<const LANES: usize>() {
let a = Vector::<LANES>::splat(Scalar::MIN);
let b = Vector::<LANES>::splat(-1);
let _ = a / b;
}
fn div_by_all_zeros_panics<const LANES: usize>() {
let a = Vector::<LANES>::splat(42);
let b = Vector::<LANES>::splat(0);
let _ = a / b;
}
fn div_by_one_zero_panics<const LANES: usize>() {
let a = Vector::<LANES>::splat(42);
let mut b = Vector::<LANES>::splat(21);
b[0] = 0 as _;
let _ = a / b;
}
fn rem_min_overflow_panic<const LANES: usize>() {
let a = Vector::<LANES>::splat(Scalar::MIN);
let b = Vector::<LANES>::splat(-1);
let _ = a % b;
}
fn rem_zero_panic<const LANES: usize>() {
let a = Vector::<LANES>::splat(42);
let b = Vector::<LANES>::splat(0);
let _ = a % b;
}
}
test_helpers::test_lanes! {
fn div_neg_one_no_panic<const LANES: usize>() {
let a = Vector::<LANES>::splat(42);
let b = Vector::<LANES>::splat(-1);
let _ = a / b;
}
fn rem_neg_one_no_panic<const LANES: usize>() {
let a = Vector::<LANES>::splat(42);
let b = Vector::<LANES>::splat(-1);
let _ = a % b;
}
}
impl_binary_op_test!(Vector<LANES>, Scalar, Add::add, AddAssign::add_assign, Scalar::wrapping_add);
impl_binary_op_test!(Vector<LANES>, Scalar, Sub::sub, SubAssign::sub_assign, Scalar::wrapping_sub);
impl_binary_op_test!(Vector<LANES>, Scalar, Mul::mul, MulAssign::mul_assign, Scalar::wrapping_mul);
// Exclude Div and Rem panicking cases
impl_binary_checked_op_test!(Vector<LANES>, Scalar, Div::div, DivAssign::div_assign, Scalar::wrapping_div, |x, y| y != 0 && !(x == Scalar::MIN && y == -1));
impl_binary_checked_op_test!(Vector<LANES>, Scalar, Rem::rem, RemAssign::rem_assign, Scalar::wrapping_rem, |x, y| y != 0 && !(x == Scalar::MIN && y == -1));
impl_unary_op_test!(Vector<LANES>, Scalar, Not::not);
impl_binary_op_test!(Vector<LANES>, Scalar, BitAnd::bitand, BitAndAssign::bitand_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, BitOr::bitor, BitOrAssign::bitor_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, BitXor::bitxor, BitXorAssign::bitxor_assign);
}
}
}
/// Implement tests for unsigned integers.
#[macro_export]
macro_rules! impl_unsigned_tests {
{ $vector:ident, $scalar:tt } => {
mod $scalar {
type Vector<const LANES: usize> = core_simd::$vector<LANES>;
type Scalar = $scalar;
test_helpers::test_lanes_panic! {
fn rem_zero_panic<const LANES: usize>() {
let a = Vector::<LANES>::splat(42);
let b = Vector::<LANES>::splat(0);
let _ = a % b;
}
}
impl_binary_op_test!(Vector<LANES>, Scalar, Add::add, AddAssign::add_assign, Scalar::wrapping_add);
impl_binary_op_test!(Vector<LANES>, Scalar, Sub::sub, SubAssign::sub_assign, Scalar::wrapping_sub);
impl_binary_op_test!(Vector<LANES>, Scalar, Mul::mul, MulAssign::mul_assign, Scalar::wrapping_mul);
// Exclude Div and Rem panicking cases
impl_binary_checked_op_test!(Vector<LANES>, Scalar, Div::div, DivAssign::div_assign, Scalar::wrapping_div, |_, y| y != 0);
impl_binary_checked_op_test!(Vector<LANES>, Scalar, Rem::rem, RemAssign::rem_assign, Scalar::wrapping_rem, |_, y| y != 0);
impl_unary_op_test!(Vector<LANES>, Scalar, Not::not);
impl_binary_op_test!(Vector<LANES>, Scalar, BitAnd::bitand, BitAndAssign::bitand_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, BitOr::bitor, BitOrAssign::bitor_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, BitXor::bitxor, BitXorAssign::bitxor_assign);
}
}
}
/// Implement tests for floating point numbers.
#[macro_export]
macro_rules! impl_float_tests {
{ $vector:ident, $scalar:tt, $int_scalar:tt } => {
mod $scalar {
type Vector<const LANES: usize> = core_simd::$vector<LANES>;
type Scalar = $scalar;
type IntScalar = $int_scalar;
impl_unary_op_test!(Vector<LANES>, Scalar, Neg::neg);
impl_binary_op_test!(Vector<LANES>, Scalar, Add::add, AddAssign::add_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, Sub::sub, SubAssign::sub_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, Mul::mul, MulAssign::mul_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, Div::div, DivAssign::div_assign);
impl_binary_op_test!(Vector<LANES>, Scalar, Rem::rem, RemAssign::rem_assign);
test_helpers::test_lanes! {
fn abs<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&Vector::<LANES>::abs,
&Scalar::abs,
&|_| true,
)
}
fn ceil<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&Vector::<LANES>::ceil,
&Scalar::ceil,
&|_| true,
)
}
fn floor<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&Vector::<LANES>::floor,
&Scalar::floor,
&|_| true,
)
}
fn round_from_int<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&Vector::<LANES>::round_from_int,
&|x| x as Scalar,
&|_| true,
)
}
fn to_int_unchecked<const LANES: usize>() {
// The maximum integer that can be represented by the equivalently sized float has
// all of the mantissa digits set to 1, pushed up to the MSB.
const ALL_MANTISSA_BITS: IntScalar = ((1 << <Scalar>::MANTISSA_DIGITS) - 1);
const MAX_REPRESENTABLE_VALUE: Scalar =
(ALL_MANTISSA_BITS << (core::mem::size_of::<Scalar>() * 8 - <Scalar>::MANTISSA_DIGITS as usize - 1)) as Scalar;
let mut runner = proptest::test_runner::TestRunner::default();
runner.run(
&test_helpers::array::UniformArrayStrategy::new(-MAX_REPRESENTABLE_VALUE..MAX_REPRESENTABLE_VALUE),
|x| {
let result_1 = unsafe { Vector::from_array(x).to_int_unchecked().to_array() };
let result_2 = {
let mut result = [0; LANES];
for (i, o) in x.iter().zip(result.iter_mut()) {
*o = unsafe { i.to_int_unchecked() };
}
result
};
test_helpers::prop_assert_biteq!(result_1, result_2);
Ok(())
},
).unwrap();
}
}
}
}
}

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_unsigned_tests! { SimdU128, u128 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_unsigned_tests! { SimdU16, u16 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_unsigned_tests! { SimdU32, u32 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_unsigned_tests! { SimdU64, u64 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_unsigned_tests! { SimdU8, u8 }

View file

@ -0,0 +1,3 @@
#[macro_use]
mod ops_macros;
impl_unsigned_tests! { SimdUsize, usize }

View file

@ -0,0 +1,11 @@
[package]
name = "test_helpers"
version = "0.1.0"
authors = ["Caleb Zulawski <caleb.zulawski@gmail.com>"]
edition = "2018"
publish = false
[dependencies.proptest]
version = "0.10"
default-features = false
features = ["alloc"]

View file

@ -0,0 +1,100 @@
//! Generic-length array strategy.
// Adapted from proptest's array code
// Copyright 2017 Jason Lingle
use proptest::{
strategy::{NewTree, Strategy, ValueTree},
test_runner::TestRunner,
};
use core::{
marker::PhantomData,
mem::MaybeUninit,
};
#[must_use = "strategies do nothing unless used"]
#[derive(Clone, Copy, Debug)]
pub struct UniformArrayStrategy<S, T> {
strategy: S,
_marker: PhantomData<T>,
}
impl<S, T> UniformArrayStrategy<S, T> {
pub const fn new(strategy: S) -> Self {
Self {
strategy,
_marker: PhantomData,
}
}
}
pub struct ArrayValueTree<T> {
tree: T,
shrinker: usize,
last_shrinker: Option<usize>,
}
impl<T, S, const LANES: usize> Strategy for UniformArrayStrategy<S, [T; LANES]>
where
T: core::fmt::Debug,
S: Strategy<Value = T>,
{
type Tree = ArrayValueTree<[S::Tree; LANES]>;
type Value = [T; LANES];
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let tree: [S::Tree; LANES] = unsafe {
let mut tree: [MaybeUninit<S::Tree>; LANES] = MaybeUninit::uninit().assume_init();
for t in tree.iter_mut() {
*t = MaybeUninit::new(self.strategy.new_tree(runner)?)
}
core::mem::transmute_copy(&tree)
};
Ok(ArrayValueTree {
tree,
shrinker: 0,
last_shrinker: None,
})
}
}
impl<T: ValueTree, const LANES: usize> ValueTree for ArrayValueTree<[T; LANES]> {
type Value = [T::Value; LANES];
fn current(&self) -> Self::Value {
unsafe {
let mut value: [MaybeUninit<T::Value>; LANES] = MaybeUninit::uninit().assume_init();
for (tree_elem, value_elem) in self.tree.iter().zip(value.iter_mut()) {
*value_elem = MaybeUninit::new(tree_elem.current());
}
core::mem::transmute_copy(&value)
}
}
fn simplify(&mut self) -> bool {
while self.shrinker < LANES {
if self.tree[self.shrinker].simplify() {
self.last_shrinker = Some(self.shrinker);
return true;
} else {
self.shrinker += 1;
}
}
false
}
fn complicate(&mut self) -> bool {
if let Some(shrinker) = self.last_shrinker {
self.shrinker = shrinker;
if self.tree[shrinker].complicate() {
true
} else {
self.last_shrinker = None;
false
}
} else {
false
}
}
}

View file

@ -0,0 +1,96 @@
//! Compare numeric types by exact bit value.
pub trait BitEq {
fn biteq(&self, other: &Self) -> bool;
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result;
}
macro_rules! impl_integer_biteq {
{ $($type:ty),* } => {
$(
impl BitEq for $type {
fn biteq(&self, other: &Self) -> bool {
self == other
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?} ({:x})", self, self)
}
}
)*
};
}
impl_integer_biteq! { u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize }
macro_rules! impl_float_biteq {
{ $($type:ty),* } => {
$(
impl BitEq for $type {
fn biteq(&self, other: &Self) -> bool {
if self.is_nan() && other.is_nan() {
true // exact nan bits don't matter
} else {
self.to_bits() == other.to_bits()
}
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?} ({:x})", self, self.to_bits())
}
}
)*
};
}
impl_float_biteq! { f32, f64 }
impl<T: BitEq, const N: usize> BitEq for [T; N] {
fn biteq(&self, other: &Self) -> bool {
self.iter()
.zip(other.iter())
.fold(true, |value, (left, right)| value && left.biteq(right))
}
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
#[repr(transparent)]
struct Wrapper<'a, T: BitEq>(&'a T);
impl<T: BitEq> core::fmt::Debug for Wrapper<'_, T> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
self.0.fmt(f)
}
}
f.debug_list()
.entries(self.iter().map(|x| Wrapper(x)))
.finish()
}
}
#[doc(hidden)]
pub struct BitEqWrapper<'a, T>(pub &'a T);
impl<T: BitEq> PartialEq for BitEqWrapper<'_, T> {
fn eq(&self, other: &Self) -> bool {
self.0.biteq(other.0)
}
}
impl<T: BitEq> core::fmt::Debug for BitEqWrapper<'_, T> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
self.0.fmt(f)
}
}
#[macro_export]
macro_rules! prop_assert_biteq {
{ $a:expr, $b:expr } => {
{
use $crate::biteq::BitEqWrapper;
let a = $a;
let b = $b;
proptest::prop_assert_eq!(BitEqWrapper(&a), BitEqWrapper(&b));
}
}
}

View file

@ -0,0 +1,385 @@
pub mod array;
#[cfg(target_arch = "wasm32")]
pub mod wasm;
#[macro_use]
pub mod biteq;
/// Specifies the default strategy for testing a type.
///
/// This strategy should be what "makes sense" to test.
pub trait DefaultStrategy {
type Strategy: proptest::strategy::Strategy<Value = Self>;
fn default_strategy() -> Self::Strategy;
}
macro_rules! impl_num {
{ $type:tt } => {
impl DefaultStrategy for $type {
type Strategy = proptest::num::$type::Any;
fn default_strategy() -> Self::Strategy {
proptest::num::$type::ANY
}
}
}
}
impl_num! { i8 }
impl_num! { i16 }
impl_num! { i32 }
impl_num! { i64 }
impl_num! { isize }
impl_num! { u8 }
impl_num! { u16 }
impl_num! { u32 }
impl_num! { u64 }
impl_num! { usize }
impl_num! { f32 }
impl_num! { f64 }
#[cfg(not(target_arch = "wasm32"))]
impl DefaultStrategy for u128 {
type Strategy = proptest::num::u128::Any;
fn default_strategy() -> Self::Strategy {
proptest::num::u128::ANY
}
}
#[cfg(not(target_arch = "wasm32"))]
impl DefaultStrategy for i128 {
type Strategy = proptest::num::i128::Any;
fn default_strategy() -> Self::Strategy {
proptest::num::i128::ANY
}
}
#[cfg(target_arch = "wasm32")]
impl DefaultStrategy for u128 {
type Strategy = crate::wasm::u128::Any;
fn default_strategy() -> Self::Strategy {
crate::wasm::u128::ANY
}
}
#[cfg(target_arch = "wasm32")]
impl DefaultStrategy for i128 {
type Strategy = crate::wasm::i128::Any;
fn default_strategy() -> Self::Strategy {
crate::wasm::i128::ANY
}
}
impl<T: core::fmt::Debug + DefaultStrategy, const LANES: usize> DefaultStrategy for [T; LANES] {
type Strategy = crate::array::UniformArrayStrategy<T::Strategy, Self>;
fn default_strategy() -> Self::Strategy {
Self::Strategy::new(T::default_strategy())
}
}
/// Test a function that takes a single value.
pub fn test_1<A: core::fmt::Debug + DefaultStrategy>(
f: &dyn Fn(A) -> proptest::test_runner::TestCaseResult,
) {
let mut runner = proptest::test_runner::TestRunner::default();
runner.run(&A::default_strategy(), f).unwrap();
}
/// Test a function that takes two values.
pub fn test_2<A: core::fmt::Debug + DefaultStrategy, B: core::fmt::Debug + DefaultStrategy>(
f: &dyn Fn(A, B) -> proptest::test_runner::TestCaseResult,
) {
let mut runner = proptest::test_runner::TestRunner::default();
runner
.run(&(A::default_strategy(), B::default_strategy()), |(a, b)| {
f(a, b)
})
.unwrap();
}
/// Test a unary vector function against a unary scalar function, applied elementwise.
#[inline(never)]
pub fn test_unary_elementwise<Scalar, ScalarResult, Vector, VectorResult, const LANES: usize>(
fv: &dyn Fn(Vector) -> VectorResult,
fs: &dyn Fn(Scalar) -> ScalarResult,
check: &dyn Fn([Scalar; LANES]) -> bool,
) where
Scalar: Copy + Default + core::fmt::Debug + DefaultStrategy,
ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
{
test_1(&|x: [Scalar; LANES]| {
proptest::prop_assume!(check(x));
let result_1: [ScalarResult; LANES] = fv(x.into()).into();
let result_2: [ScalarResult; LANES] = {
let mut result = [ScalarResult::default(); LANES];
for (i, o) in x.iter().zip(result.iter_mut()) {
*o = fs(*i);
}
result
};
crate::prop_assert_biteq!(result_1, result_2);
Ok(())
});
}
/// Test a binary vector function against a binary scalar function, applied elementwise.
#[inline(never)]
pub fn test_binary_elementwise<
Scalar1,
Scalar2,
ScalarResult,
Vector1,
Vector2,
VectorResult,
const LANES: usize,
>(
fv: &dyn Fn(Vector1, Vector2) -> VectorResult,
fs: &dyn Fn(Scalar1, Scalar2) -> ScalarResult,
check: &dyn Fn([Scalar1; LANES], [Scalar2; LANES]) -> bool,
) where
Scalar1: Copy + Default + core::fmt::Debug + DefaultStrategy,
Scalar2: Copy + Default + core::fmt::Debug + DefaultStrategy,
ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
Vector1: Into<[Scalar1; LANES]> + From<[Scalar1; LANES]> + Copy,
Vector2: Into<[Scalar2; LANES]> + From<[Scalar2; LANES]> + Copy,
VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
{
test_2(&|x: [Scalar1; LANES], y: [Scalar2; LANES]| {
proptest::prop_assume!(check(x, y));
let result_1: [ScalarResult; LANES] = fv(x.into(), y.into()).into();
let result_2: [ScalarResult; LANES] = {
let mut result = [ScalarResult::default(); LANES];
for ((i1, i2), o) in x.iter().zip(y.iter()).zip(result.iter_mut()) {
*o = fs(*i1, *i2);
}
result
};
crate::prop_assert_biteq!(result_1, result_2);
Ok(())
});
}
/// Test a binary vector-scalar function against a binary scalar function, applied elementwise.
#[inline(never)]
pub fn test_binary_scalar_rhs_elementwise<
Scalar1,
Scalar2,
ScalarResult,
Vector,
VectorResult,
const LANES: usize,
>(
fv: &dyn Fn(Vector, Scalar2) -> VectorResult,
fs: &dyn Fn(Scalar1, Scalar2) -> ScalarResult,
check: &dyn Fn([Scalar1; LANES], Scalar2) -> bool,
) where
Scalar1: Copy + Default + core::fmt::Debug + DefaultStrategy,
Scalar2: Copy + Default + core::fmt::Debug + DefaultStrategy,
ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
Vector: Into<[Scalar1; LANES]> + From<[Scalar1; LANES]> + Copy,
VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
{
test_2(&|x: [Scalar1; LANES], y: Scalar2| {
proptest::prop_assume!(check(x, y));
let result_1: [ScalarResult; LANES] = fv(x.into(), y).into();
let result_2: [ScalarResult; LANES] = {
let mut result = [ScalarResult::default(); LANES];
for (i, o) in x.iter().zip(result.iter_mut()) {
*o = fs(*i, y);
}
result
};
crate::prop_assert_biteq!(result_1, result_2);
Ok(())
});
}
/// Test a binary vector-scalar function against a binary scalar function, applied elementwise.
#[inline(never)]
pub fn test_binary_scalar_lhs_elementwise<
Scalar1,
Scalar2,
ScalarResult,
Vector,
VectorResult,
const LANES: usize,
>(
fv: &dyn Fn(Scalar1, Vector) -> VectorResult,
fs: &dyn Fn(Scalar1, Scalar2) -> ScalarResult,
check: &dyn Fn(Scalar1, [Scalar2; LANES]) -> bool,
) where
Scalar1: Copy + Default + core::fmt::Debug + DefaultStrategy,
Scalar2: Copy + Default + core::fmt::Debug + DefaultStrategy,
ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
Vector: Into<[Scalar2; LANES]> + From<[Scalar2; LANES]> + Copy,
VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
{
test_2(&|x: Scalar1, y: [Scalar2; LANES]| {
proptest::prop_assume!(check(x, y));
let result_1: [ScalarResult; LANES] = fv(x, y.into()).into();
let result_2: [ScalarResult; LANES] = {
let mut result = [ScalarResult::default(); LANES];
for (i, o) in y.iter().zip(result.iter_mut()) {
*o = fs(x, *i);
}
result
};
crate::prop_assert_biteq!(result_1, result_2);
Ok(())
});
}
/// Expand a const-generic test into separate tests for each possible lane count.
#[macro_export]
macro_rules! test_lanes {
{
$(fn $test:ident<const $lanes:ident: usize>() $body:tt)*
} => {
$(
mod $test {
use super::*;
fn implementation<const $lanes: usize>()
where
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost64,
core_simd::BitMask<$lanes>: core_simd::LanesAtMost64,
$body
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_1() {
implementation::<1>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_2() {
implementation::<2>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_4() {
implementation::<4>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_8() {
implementation::<8>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_16() {
implementation::<16>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_32() {
implementation::<32>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
fn lanes_64() {
implementation::<64>();
}
}
)*
}
}
/// Expand a const-generic `#[should_panic]` test into separate tests for each possible lane count.
#[macro_export]
macro_rules! test_lanes_panic {
{
$(fn $test:ident<const $lanes:ident: usize>() $body:tt)*
} => {
$(
mod $test {
use super::*;
fn implementation<const $lanes: usize>()
where
core_simd::SimdU8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdU128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdUsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI8<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI16<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI64<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdI128<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdIsize<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF32<$lanes>: core_simd::LanesAtMost64,
core_simd::SimdF64<$lanes>: core_simd::LanesAtMost64,
core_simd::BitMask<$lanes>: core_simd::LanesAtMost64,
$body
#[test]
#[should_panic]
fn lanes_1() {
implementation::<1>();
}
#[test]
#[should_panic]
fn lanes_2() {
implementation::<2>();
}
#[test]
#[should_panic]
fn lanes_4() {
implementation::<4>();
}
#[test]
#[should_panic]
fn lanes_8() {
implementation::<8>();
}
#[test]
#[should_panic]
fn lanes_16() {
implementation::<16>();
}
#[test]
#[should_panic]
fn lanes_32() {
implementation::<32>();
}
#[test]
#[should_panic]
fn lanes_64() {
implementation::<64>();
}
}
)*
}
}

View file

@ -0,0 +1,51 @@
//! Strategies for `u128` and `i128`, since proptest doesn't provide them for the wasm target.
macro_rules! impl_num {
{ $name:ident } => {
pub(crate) mod $name {
type InnerStrategy = crate::array::UniformArrayStrategy<proptest::num::u64::Any, [u64; 2]>;
use proptest::strategy::{Strategy, ValueTree, NewTree};
#[must_use = "strategies do nothing unless used"]
#[derive(Clone, Copy, Debug)]
pub struct Any {
strategy: InnerStrategy,
}
pub struct BinarySearch {
inner: <InnerStrategy as Strategy>::Tree,
}
impl ValueTree for BinarySearch {
type Value = $name;
fn current(&self) -> $name {
unsafe { core::mem::transmute(self.inner.current()) }
}
fn simplify(&mut self) -> bool {
self.inner.simplify()
}
fn complicate(&mut self) -> bool {
self.inner.complicate()
}
}
impl Strategy for Any {
type Tree = BinarySearch;
type Value = $name;
fn new_tree(&self, runner: &mut proptest::test_runner::TestRunner) -> NewTree<Self> {
Ok(BinarySearch { inner: self.strategy.new_tree(runner)? })
}
}
pub const ANY: Any = Any { strategy: InnerStrategy::new(proptest::num::u64::ANY) };
}
}
}
impl_num! { u128 }
impl_num! { i128 }