From b6d0eec3de0ac75ce81784251b4648a1cef7f628 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Wed, 8 Dec 2021 16:44:21 -0800 Subject: [PATCH 1/6] Wrap bitshifts in ops.rs For all other operators, we use wrapping logic where applicable. This is another case it applies. Per rust-lang/rust#91237, we may wish to specify this as the natural behavior of `simd_{shl,shr}`. --- crates/core_simd/src/ops.rs | 168 +++++++++++++++++++++++------------- 1 file changed, 109 insertions(+), 59 deletions(-) diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs index 3582c57870b9..2ebcef3d8297 100644 --- a/crates/core_simd/src/ops.rs +++ b/crates/core_simd/src/ops.rs @@ -32,14 +32,115 @@ where } } -/// Checks if the right-hand side argument of a left- or right-shift would cause overflow. -fn invalid_shift_rhs(rhs: T) -> bool -where - T: Default + PartialOrd + core::convert::TryFrom, - >::Error: core::fmt::Debug, -{ - let bits_in_type = T::try_from(8 * core::mem::size_of::()).unwrap(); - rhs < T::default() || rhs >= bits_in_type +/// SAFETY: This macro should not be used for anything except Shl or Shr, and passed the appropriate shift intrinsic. +/// It handles performing a bitand in addition to calling the shift operator, so that the result +/// is well-defined: LLVM can return a poison value if you shl, lshr, or ashr if rhs >= ::BITS +/// At worst, this will maybe add another instruction and cycle, +/// at best, it may open up more optimization opportunities, +/// or simply be elided entirely, especially for SIMD ISAs which default to this. +/// +// FIXME: Consider implementing this in cg_llvm instead? +// cg_clif defaults to this, and scalar MIR shifts also default to wrapping +macro_rules! wrap_bitshift_inner { + (impl $op:ident for Simd<$int:ty, LANES> { + fn $call:ident(self, rhs: Self) -> Self::Output { + unsafe { $simd_call:ident } + } + }) => { + impl $op for Simd<$int, LANES> + where + $int: SimdElement, + LaneCount: SupportedLaneCount, + { + type Output = Self; + + #[inline] + #[must_use = "operator returns a new vector without mutating the inputs"] + fn $call(self, rhs: Self) -> Self::Output { + unsafe { + $crate::intrinsics::$simd_call(self, rhs.bitand(Simd::splat(<$int>::BITS as $int - 1))) + } + } + } + }; +} + +macro_rules! wrap_bitshifts { + ($(impl ShiftOps for Simd<$int:ty, LANES> { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + })*) => { + $( + wrap_bitshift_inner! { + impl Shl for Simd<$int, LANES> { + fn shl(self, rhs: Self) -> Self::Output { + unsafe { simd_shl } + } + } + } + wrap_bitshift_inner! { + impl Shr for Simd<$int, LANES> { + fn shr(self, rhs: Self) -> Self::Output { + // This automatically monomorphizes to lshr or ashr, depending, + // so it's fine to use it for both UInts and SInts. + unsafe { simd_shr } + } + } + } + )* + }; +} + +wrap_bitshifts! { + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } + + impl ShiftOps for Simd { + fn shl(self, rhs: Self) -> Self::Output; + fn shr(self, rhs: Self) -> Self::Output; + } } /// Automatically implements operators over references in addition to the provided operator. @@ -85,12 +186,6 @@ macro_rules! impl_op { { impl Rem for $scalar:ty } => { impl_op! { @binary $scalar, Rem::rem, simd_rem } }; - { impl Shl for $scalar:ty } => { - impl_op! { @binary $scalar, Shl::shl, simd_shl } - }; - { impl Shr for $scalar:ty } => { - impl_op! { @binary $scalar, Shr::shr, simd_shr } - }; { impl BitAnd for $scalar:ty } => { impl_op! { @binary $scalar, BitAnd::bitand, simd_and } }; @@ -202,51 +297,6 @@ macro_rules! impl_unsigned_int_ops { } } } - - // shifts panic on overflow - impl_ref_ops! { - impl core::ops::Shl for Simd<$scalar, LANES> - where - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - fn shl(self, rhs: Self) -> Self::Output { - // TODO there is probably a better way of doing this - if rhs.as_array() - .iter() - .copied() - .any(invalid_shift_rhs) - { - panic!("attempt to shift left with overflow"); - } - unsafe { intrinsics::simd_shl(self, rhs) } - } - } - } - - impl_ref_ops! { - impl core::ops::Shr for Simd<$scalar, LANES> - where - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - fn shr(self, rhs: Self) -> Self::Output { - // TODO there is probably a better way of doing this - if rhs.as_array() - .iter() - .copied() - .any(invalid_shift_rhs) - { - panic!("attempt to shift with overflow"); - } - unsafe { intrinsics::simd_shr(self, rhs) } - } - } - } )* }; } From 8aef340b8b0658e34b54fdea59e5ffc5ec581106 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Wed, 8 Dec 2021 17:23:54 -0800 Subject: [PATCH 2/6] Refactor bitops with `#[must_use]` --- crates/core_simd/src/ops.rs | 131 +++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 33 deletions(-) diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs index 2ebcef3d8297..5e775d6ca138 100644 --- a/crates/core_simd/src/ops.rs +++ b/crates/core_simd/src/ops.rs @@ -32,6 +32,29 @@ where } } +macro_rules! unsafe_base_op { + ($(impl $op:ident for Simd<$scalar:ty, LANES> { + fn $call:ident(self, rhs: Self) -> Self::Output { + unsafe{ $simd_call:ident } + } + })*) => { + $(impl $op for Simd<$scalar, LANES> + where + $scalar: SimdElement, + LaneCount: SupportedLaneCount, + { + type Output = Self; + + #[inline] + #[must_use = "operator returns a new vector without mutating the inputs"] + fn $call(self, rhs: Self) -> Self::Output { + unsafe { $crate::intrinsics::$simd_call(self, rhs) } + } + } + )* + } +} + /// SAFETY: This macro should not be used for anything except Shl or Shr, and passed the appropriate shift intrinsic. /// It handles performing a bitand in addition to calling the shift operator, so that the result /// is well-defined: LLVM can return a poison value if you shl, lshr, or ashr if rhs >= ::BITS @@ -41,13 +64,13 @@ where /// // FIXME: Consider implementing this in cg_llvm instead? // cg_clif defaults to this, and scalar MIR shifts also default to wrapping -macro_rules! wrap_bitshift_inner { - (impl $op:ident for Simd<$int:ty, LANES> { +macro_rules! wrap_bitshift { + ($(impl $op:ident for Simd<$int:ty, LANES> { fn $call:ident(self, rhs: Self) -> Self::Output { unsafe { $simd_call:ident } } - }) => { - impl $op for Simd<$int, LANES> + })*) => { + $(impl $op for Simd<$int, LANES> where $int: SimdElement, LaneCount: SupportedLaneCount, @@ -61,24 +84,45 @@ macro_rules! wrap_bitshift_inner { $crate::intrinsics::$simd_call(self, rhs.bitand(Simd::splat(<$int>::BITS as $int - 1))) } } - } + })* }; } -macro_rules! wrap_bitshifts { - ($(impl ShiftOps for Simd<$int:ty, LANES> { +macro_rules! bitops { + ($(impl BitOps for Simd<$int:ty, LANES> { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; })*) => { $( - wrap_bitshift_inner! { + unsafe_base_op!{ + impl BitAnd for Simd<$int, LANES> { + fn bitand(self, rhs: Self) -> Self::Output { + unsafe { simd_and } + } + } + + impl BitOr for Simd<$int, LANES> { + fn bitor(self, rhs: Self) -> Self::Output { + unsafe { simd_or } + } + } + + impl BitXor for Simd<$int, LANES> { + fn bitxor(self, rhs: Self) -> Self::Output { + unsafe { simd_xor } + } + } + } + wrap_bitshift! { impl Shl for Simd<$int, LANES> { fn shl(self, rhs: Self) -> Self::Output { unsafe { simd_shl } } } - } - wrap_bitshift_inner! { + impl Shr for Simd<$int, LANES> { fn shr(self, rhs: Self) -> Self::Output { // This automatically monomorphizes to lshr or ashr, depending, @@ -91,53 +135,86 @@ macro_rules! wrap_bitshifts { }; } -wrap_bitshifts! { - impl ShiftOps for Simd { +// Integers can always accept bitand, bitor, and bitxor. +// The only question is how to handle shifts >= ::BITS? +// Our current solution uses wrapping logic. +bitops! { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } - impl ShiftOps for Simd { + impl BitOps for Simd { + fn bitand(self, rhs: Self) -> Self::Output; + fn bitor(self, rhs: Self) -> Self::Output; + fn bitxor(self, rhs: Self) -> Self::Output; fn shl(self, rhs: Self) -> Self::Output; fn shr(self, rhs: Self) -> Self::Output; } @@ -186,15 +263,6 @@ macro_rules! impl_op { { impl Rem for $scalar:ty } => { impl_op! { @binary $scalar, Rem::rem, simd_rem } }; - { impl BitAnd for $scalar:ty } => { - impl_op! { @binary $scalar, BitAnd::bitand, simd_and } - }; - { impl BitOr for $scalar:ty } => { - impl_op! { @binary $scalar, BitOr::bitor, simd_or } - }; - { impl BitXor for $scalar:ty } => { - impl_op! { @binary $scalar, BitXor::bitxor, simd_xor } - }; // generic binary op with assignment when output is `Self` { @binary $scalar:ty, $trait:ident :: $trait_fn:ident, $intrinsic:ident } => { @@ -236,9 +304,6 @@ macro_rules! impl_unsigned_int_ops { impl_op! { impl Add for $scalar } impl_op! { impl Sub for $scalar } impl_op! { impl Mul for $scalar } - impl_op! { impl BitAnd for $scalar } - impl_op! { impl BitOr for $scalar } - impl_op! { impl BitXor for $scalar } // Integers panic on divide by 0 impl_ref_ops! { From 049e8ca7f7fc42501b98afcb9c32fd51080bd75a Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Wed, 8 Dec 2021 17:31:19 -0800 Subject: [PATCH 3/6] Refactor float arith with `#[must_use]` --- crates/core_simd/src/ops.rs | 78 ++++++++++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 14 deletions(-) diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs index 5e775d6ca138..65b461d39818 100644 --- a/crates/core_simd/src/ops.rs +++ b/crates/core_simd/src/ops.rs @@ -220,6 +220,70 @@ bitops! { } } +macro_rules! float_arith { + ($(impl FloatArith for Simd<$float:ty, LANES> { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + })*) => { + $( + unsafe_base_op!{ + impl Add for Simd<$float, LANES> { + fn add(self, rhs: Self) -> Self::Output { + unsafe { simd_add } + } + } + + impl Mul for Simd<$float, LANES> { + fn mul(self, rhs: Self) -> Self::Output { + unsafe { simd_mul } + } + } + + impl Sub for Simd<$float, LANES> { + fn sub(self, rhs: Self) -> Self::Output { + unsafe { simd_sub } + } + } + + impl Div for Simd<$float, LANES> { + fn div(self, rhs: Self) -> Self::Output { + unsafe { simd_div } + } + } + + impl Rem for Simd<$float, LANES> { + fn rem(self, rhs: Self) -> Self::Output { + unsafe { simd_rem } + } + } + } + )* + }; +} + +// We don't need any special precautions here: +// Floats always accept arithmetic ops, but may become NaN. +float_arith! { + impl FloatArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl FloatArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } +} + /// Automatically implements operators over references in addition to the provided operator. macro_rules! impl_ref_ops { // binary op @@ -284,19 +348,6 @@ macro_rules! impl_op { }; } -/// Implements floating-point operators for the provided types. -macro_rules! impl_float_ops { - { $($scalar:ty),* } => { - $( - impl_op! { impl Add for $scalar } - impl_op! { impl Sub for $scalar } - impl_op! { impl Mul for $scalar } - impl_op! { impl Div for $scalar } - impl_op! { impl Rem for $scalar } - )* - }; -} - /// Implements unsigned integer operators for the provided types. macro_rules! impl_unsigned_int_ops { { $($scalar:ty),* } => { @@ -375,4 +426,3 @@ macro_rules! impl_signed_int_ops { impl_unsigned_int_ops! { u8, u16, u32, u64, usize } impl_signed_int_ops! { i8, i16, i32, i64, isize } -impl_float_ops! { f32, f64 } From 5dcd397f47a17aec3b049af2d7541530b859e47b Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Wed, 8 Dec 2021 18:42:06 -0800 Subject: [PATCH 4/6] Finish refactoring ints in ops.rs This should perform a SIMD check for whether or not we can div/rem, so that we can panic several times faster! --- crates/core_simd/src/ops.rs | 281 +++++++++++++++++++----------------- 1 file changed, 152 insertions(+), 129 deletions(-) diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs index 65b461d39818..e6d7e695391c 100644 --- a/crates/core_simd/src/ops.rs +++ b/crates/core_simd/src/ops.rs @@ -1,5 +1,4 @@ -use crate::simd::intrinsics; -use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount}; +use crate::simd::{LaneCount, Mask, Simd, SimdElement, SupportedLaneCount}; use core::ops::{Add, Mul}; use core::ops::{BitAnd, BitOr, BitXor}; use core::ops::{Div, Rem, Sub}; @@ -284,145 +283,169 @@ float_arith! { } } -/// Automatically implements operators over references in addition to the provided operator. -macro_rules! impl_ref_ops { - // binary op - { - impl core::ops::$trait:ident<$rhs:ty> for $type:ty - where - LaneCount<$lanes2:ident>: SupportedLaneCount, - { - type Output = $output:ty; - - $(#[$attrs:meta])* - fn $fn:ident($self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) -> Self::Output $body:tt +// Division by zero is poison, according to LLVM. +// So is dividing the MIN value of a signed integer by -1, +// since that would return MAX + 1. +// FIXME: Rust allows ::MIN / -1, +// so we should probably figure out how to make that safe. +macro_rules! int_divrem_guard { + ($(impl $op:ident for Simd<$sint:ty, LANES> { + const PANIC_ZERO: &'static str = $zero:literal; + const PANIC_OVERFLOW: &'static str = $overflow:literal; + fn $call:ident { + unsafe { $simd_call:ident } } - } => { - impl core::ops::$trait<$rhs> for $type + })*) => { + $(impl $op for Simd<$sint, LANES> where - LaneCount<$lanes2>: SupportedLaneCount, + $sint: SimdElement, + LaneCount: SupportedLaneCount, { - type Output = $output; - - $(#[$attrs])* - fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body - } + type Output = Self; + #[inline] + #[must_use = "operator returns a new vector without mutating the inputs"] + fn $call(self, rhs: Self) -> Self::Output { + if rhs.lanes_eq(Simd::splat(0)).any() { + panic!("attempt to calculate the remainder with a divisor of zero"); + } else if <$sint>::MIN != 0 && self.lanes_eq(Simd::splat(<$sint>::MIN)) & rhs.lanes_eq(Simd::splat(-1 as _)) + != Mask::splat(false) + { + panic!("attempt to calculate the remainder with overflow"); + } else { + unsafe { $crate::intrinsics::$simd_call(self, rhs) } + } + } + })* }; } -/// Automatically implements operators over vectors and scalars for a particular vector. -macro_rules! impl_op { - { impl Add for $scalar:ty } => { - impl_op! { @binary $scalar, Add::add, simd_add } - }; - { impl Sub for $scalar:ty } => { - impl_op! { @binary $scalar, Sub::sub, simd_sub } - }; - { impl Mul for $scalar:ty } => { - impl_op! { @binary $scalar, Mul::mul, simd_mul } - }; - { impl Div for $scalar:ty } => { - impl_op! { @binary $scalar, Div::div, simd_div } - }; - { impl Rem for $scalar:ty } => { - impl_op! { @binary $scalar, Rem::rem, simd_rem } - }; - - // generic binary op with assignment when output is `Self` - { @binary $scalar:ty, $trait:ident :: $trait_fn:ident, $intrinsic:ident } => { - impl_ref_ops! { - impl core::ops::$trait for Simd<$scalar, LANES> - where - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - fn $trait_fn(self, rhs: Self) -> Self::Output { - unsafe { - intrinsics::$intrinsic(self, rhs) - } - } - } - } - }; -} - -/// Implements unsigned integer operators for the provided types. -macro_rules! impl_unsigned_int_ops { - { $($scalar:ty),* } => { +macro_rules! int_arith { + ($(impl IntArith for Simd<$sint:ty, LANES> { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + })*) => { $( - impl_op! { impl Add for $scalar } - impl_op! { impl Sub for $scalar } - impl_op! { impl Mul for $scalar } - - // Integers panic on divide by 0 - impl_ref_ops! { - impl core::ops::Div for Simd<$scalar, LANES> - where - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - fn div(self, rhs: Self) -> Self::Output { - if rhs.as_array() - .iter() - .any(|x| *x == 0) - { - panic!("attempt to divide by zero"); - } - - // Guards for div(MIN, -1), - // this check only applies to signed ints - if <$scalar>::MIN != 0 && self.as_array().iter() - .zip(rhs.as_array().iter()) - .any(|(x,y)| *x == <$scalar>::MIN && *y == -1 as _) { - panic!("attempt to divide with overflow"); - } - unsafe { intrinsics::simd_div(self, rhs) } - } + unsafe_base_op!{ + impl Add for Simd<$sint, LANES> { + fn add(self, rhs: Self) -> Self::Output { + unsafe { simd_add } } } - // remainder panics on zero divisor - impl_ref_ops! { - impl core::ops::Rem for Simd<$scalar, LANES> - where - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - fn rem(self, rhs: Self) -> Self::Output { - if rhs.as_array() - .iter() - .any(|x| *x == 0) - { - panic!("attempt to calculate the remainder with a divisor of zero"); - } - - // Guards for rem(MIN, -1) - // this branch applies the check only to signed ints - if <$scalar>::MIN != 0 && self.as_array().iter() - .zip(rhs.as_array().iter()) - .any(|(x,y)| *x == <$scalar>::MIN && *y == -1 as _) { - panic!("attempt to calculate the remainder with overflow"); - } - unsafe { intrinsics::simd_rem(self, rhs) } - } + impl Mul for Simd<$sint, LANES> { + fn mul(self, rhs: Self) -> Self::Output { + unsafe { simd_mul } } } - )* - }; + + impl Sub for Simd<$sint, LANES> { + fn sub(self, rhs: Self) -> Self::Output { + unsafe { simd_sub } + } + } + } + + int_divrem_guard!{ + impl Div for Simd<$sint, LANES> { + const PANIC_ZERO: &'static str = "attempt to divide by zero"; + const PANIC_OVERFLOW: &'static str = "attempt to divide with overflow"; + fn div { + unsafe { simd_div } + } + } + + impl Rem for Simd<$sint, LANES> { + const PANIC_ZERO: &'static str = "attempt to calculate the remainder with a divisor of zero"; + const PANIC_OVERFLOW: &'static str = "attempt to calculate the remainder with overflow"; + fn rem { + unsafe { simd_rem } + } + } + })* + } } -/// Implements unsigned integer operators for the provided types. -macro_rules! impl_signed_int_ops { - { $($scalar:ty),* } => { - impl_unsigned_int_ops! { $($scalar),* } - }; -} +int_arith! { + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } -impl_unsigned_int_ops! { u8, u16, u32, u64, usize } -impl_signed_int_ops! { i8, i16, i32, i64, isize } + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } + + impl IntArith for Simd { + fn add(self, rhs: Self) -> Self::Output; + fn mul(self, rhs: Self) -> Self::Output; + fn sub(self, rhs: Self) -> Self::Output; + fn div(self, rhs: Self) -> Self::Output; + fn rem(self, rhs: Self) -> Self::Output; + } +} From bc326a2bbccdccb321328e7a1cde3ad3734a5953 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 21 Dec 2021 18:28:57 -0800 Subject: [PATCH 5/6] Refactor ops.rs with a recursive macro This approaches reducing macro nesting in a slightly different way. Instead of just flattening details, make one macro apply another. This allows specifying all details up-front in the first macro invocation, making it easier to audit and refactor in the future. --- crates/core_simd/src/ops.rs | 518 +++++++++++------------------------- 1 file changed, 152 insertions(+), 366 deletions(-) diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs index e6d7e695391c..6cfc8f80b53c 100644 --- a/crates/core_simd/src/ops.rs +++ b/crates/core_simd/src/ops.rs @@ -31,27 +31,10 @@ where } } -macro_rules! unsafe_base_op { - ($(impl $op:ident for Simd<$scalar:ty, LANES> { - fn $call:ident(self, rhs: Self) -> Self::Output { - unsafe{ $simd_call:ident } - } - })*) => { - $(impl $op for Simd<$scalar, LANES> - where - $scalar: SimdElement, - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - #[must_use = "operator returns a new vector without mutating the inputs"] - fn $call(self, rhs: Self) -> Self::Output { - unsafe { $crate::intrinsics::$simd_call(self, rhs) } - } - } - )* - } +macro_rules! unsafe_base { + ($lhs:ident, $rhs:ident, {$simd_call:ident}, $($_:tt)*) => { + unsafe { $crate::intrinsics::$simd_call($lhs, $rhs) } + }; } /// SAFETY: This macro should not be used for anything except Shl or Shr, and passed the appropriate shift intrinsic. @@ -64,388 +47,191 @@ macro_rules! unsafe_base_op { // FIXME: Consider implementing this in cg_llvm instead? // cg_clif defaults to this, and scalar MIR shifts also default to wrapping macro_rules! wrap_bitshift { - ($(impl $op:ident for Simd<$int:ty, LANES> { - fn $call:ident(self, rhs: Self) -> Self::Output { - unsafe { $simd_call:ident } + ($lhs:ident, $rhs:ident, {$simd_call:ident}, $int:ident) => { + unsafe { + $crate::intrinsics::$simd_call($lhs, $rhs.bitand(Simd::splat(<$int>::BITS as $int - 1))) } - })*) => { - $(impl $op for Simd<$int, LANES> - where - $int: SimdElement, - LaneCount: SupportedLaneCount, - { - type Output = Self; - - #[inline] - #[must_use = "operator returns a new vector without mutating the inputs"] - fn $call(self, rhs: Self) -> Self::Output { - unsafe { - $crate::intrinsics::$simd_call(self, rhs.bitand(Simd::splat(<$int>::BITS as $int - 1))) - } - } - })* }; } -macro_rules! bitops { - ($(impl BitOps for Simd<$int:ty, LANES> { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - })*) => { - $( - unsafe_base_op!{ - impl BitAnd for Simd<$int, LANES> { - fn bitand(self, rhs: Self) -> Self::Output { - unsafe { simd_and } - } - } - - impl BitOr for Simd<$int, LANES> { - fn bitor(self, rhs: Self) -> Self::Output { - unsafe { simd_or } - } - } - - impl BitXor for Simd<$int, LANES> { - fn bitxor(self, rhs: Self) -> Self::Output { - unsafe { simd_xor } - } - } - } - wrap_bitshift! { - impl Shl for Simd<$int, LANES> { - fn shl(self, rhs: Self) -> Self::Output { - unsafe { simd_shl } - } - } - - impl Shr for Simd<$int, LANES> { - fn shr(self, rhs: Self) -> Self::Output { - // This automatically monomorphizes to lshr or ashr, depending, - // so it's fine to use it for both UInts and SInts. - unsafe { simd_shr } - } - } - } - )* - }; -} - -// Integers can always accept bitand, bitor, and bitxor. -// The only question is how to handle shifts >= ::BITS? -// Our current solution uses wrapping logic. -bitops! { - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } - - impl BitOps for Simd { - fn bitand(self, rhs: Self) -> Self::Output; - fn bitor(self, rhs: Self) -> Self::Output; - fn bitxor(self, rhs: Self) -> Self::Output; - fn shl(self, rhs: Self) -> Self::Output; - fn shr(self, rhs: Self) -> Self::Output; - } -} - -macro_rules! float_arith { - ($(impl FloatArith for Simd<$float:ty, LANES> { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; - })*) => { - $( - unsafe_base_op!{ - impl Add for Simd<$float, LANES> { - fn add(self, rhs: Self) -> Self::Output { - unsafe { simd_add } - } - } - - impl Mul for Simd<$float, LANES> { - fn mul(self, rhs: Self) -> Self::Output { - unsafe { simd_mul } - } - } - - impl Sub for Simd<$float, LANES> { - fn sub(self, rhs: Self) -> Self::Output { - unsafe { simd_sub } - } - } - - impl Div for Simd<$float, LANES> { - fn div(self, rhs: Self) -> Self::Output { - unsafe { simd_div } - } - } - - impl Rem for Simd<$float, LANES> { - fn rem(self, rhs: Self) -> Self::Output { - unsafe { simd_rem } - } - } - } - )* - }; -} - -// We don't need any special precautions here: -// Floats always accept arithmetic ops, but may become NaN. -float_arith! { - impl FloatArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; - } - - impl FloatArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; - } -} - // Division by zero is poison, according to LLVM. // So is dividing the MIN value of a signed integer by -1, // since that would return MAX + 1. // FIXME: Rust allows ::MIN / -1, // so we should probably figure out how to make that safe. macro_rules! int_divrem_guard { - ($(impl $op:ident for Simd<$sint:ty, LANES> { - const PANIC_ZERO: &'static str = $zero:literal; - const PANIC_OVERFLOW: &'static str = $overflow:literal; - fn $call:ident { - unsafe { $simd_call:ident } - } - })*) => { - $(impl $op for Simd<$sint, LANES> - where - $sint: SimdElement, - LaneCount: SupportedLaneCount, + ( $lhs:ident, + $rhs:ident, + { const PANIC_ZERO: &'static str = $zero:literal; + const PANIC_OVERFLOW: &'static str = $overflow:literal; + $simd_call:ident + }, + $int:ident ) => { + if $rhs.lanes_eq(Simd::splat(0)).any() { + panic!($zero); + } else if <$int>::MIN != 0 + && $lhs.lanes_eq(Simd::splat(<$int>::MIN)) & $rhs.lanes_eq(Simd::splat(-1 as _)) + != Mask::splat(false) { - type Output = Self; - #[inline] - #[must_use = "operator returns a new vector without mutating the inputs"] - fn $call(self, rhs: Self) -> Self::Output { - if rhs.lanes_eq(Simd::splat(0)).any() { - panic!("attempt to calculate the remainder with a divisor of zero"); - } else if <$sint>::MIN != 0 && self.lanes_eq(Simd::splat(<$sint>::MIN)) & rhs.lanes_eq(Simd::splat(-1 as _)) - != Mask::splat(false) - { - panic!("attempt to calculate the remainder with overflow"); - } else { - unsafe { $crate::intrinsics::$simd_call(self, rhs) } - } - } - })* + panic!($overflow); + } else { + unsafe { $crate::intrinsics::$simd_call($lhs, $rhs) } + } }; } -macro_rules! int_arith { - ($(impl IntArith for Simd<$sint:ty, LANES> { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; - })*) => { - $( - unsafe_base_op!{ - impl Add for Simd<$sint, LANES> { - fn add(self, rhs: Self) -> Self::Output { - unsafe { simd_add } - } - } +macro_rules! for_base_types { + ( T = ($($scalar:ident),*); + type Lhs = Simd; + type Rhs = Simd; + type Output = $out:ty; - impl Mul for Simd<$sint, LANES> { - fn mul(self, rhs: Self) -> Self::Output { - unsafe { simd_mul } - } - } + impl $op:ident::$call:ident { + $macro_impl:ident $inner:tt + }) => { + $( + impl $op for Simd<$scalar, N> + where + $scalar: SimdElement, + LaneCount: SupportedLaneCount, + { + type Output = $out; - impl Sub for Simd<$sint, LANES> { - fn sub(self, rhs: Self) -> Self::Output { - unsafe { simd_sub } - } - } + #[inline] + #[must_use = "operator returns a new vector without mutating the inputs"] + fn $call(self, rhs: Self) -> Self::Output { + $macro_impl!(self, rhs, $inner, $scalar) + } + })* + } +} + +// A "TokenTree muncher": takes a set of scalar types `T = {};` +// type parameters for the ops it implements, `Op::fn` names, +// and a macro that expands into an expr, substituting in an intrinsic. +// It passes that to for_base_types, which expands an impl for the types, +// using the expanded expr in the function, and recurses with itself. +// +// tl;dr impls a set of ops::{Traits} for a set of types +macro_rules! for_base_ops { + ( + T = $types:tt; + type Lhs = Simd; + type Rhs = Simd; + type Output = $out:ident; + impl $op:ident::$call:ident + $inner:tt + $($rest:tt)* + ) => { + for_base_types! { + T = $types; + type Lhs = Simd; + type Rhs = Simd; + type Output = $out; + impl $op::$call + $inner } - - int_divrem_guard!{ - impl Div for Simd<$sint, LANES> { - const PANIC_ZERO: &'static str = "attempt to divide by zero"; - const PANIC_OVERFLOW: &'static str = "attempt to divide with overflow"; - fn div { - unsafe { simd_div } - } - } - - impl Rem for Simd<$sint, LANES> { - const PANIC_ZERO: &'static str = "attempt to calculate the remainder with a divisor of zero"; - const PANIC_OVERFLOW: &'static str = "attempt to calculate the remainder with overflow"; - fn rem { - unsafe { simd_rem } - } - } - })* + for_base_ops! { + T = $types; + type Lhs = Simd; + type Rhs = Simd; + type Output = $out; + $($rest)* + } + }; + ($($done:tt)*) => { + // Done. } } -int_arith! { - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; +// Integers can always accept add, mul, sub, bitand, bitor, and bitxor. +// For all of these operations, simd_* intrinsics apply wrapping logic. +for_base_ops! { + T = (i8, i16, i32, i64, isize, u8, u16, u32, u64, usize); + type Lhs = Simd; + type Rhs = Simd; + type Output = Self; + + impl Add::add { + unsafe_base { simd_add } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl Mul::mul { + unsafe_base { simd_mul } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl Sub::sub { + unsafe_base { simd_sub } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl BitAnd::bitand { + unsafe_base { simd_and } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl BitOr::bitor { + unsafe_base { simd_or } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl BitXor::bitxor { + unsafe_base { simd_xor } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl Div::div { + int_divrem_guard { + const PANIC_ZERO: &'static str = "attempt to divide by zero"; + const PANIC_OVERFLOW: &'static str = "attempt to divide with overflow"; + simd_div + } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl Rem::rem { + int_divrem_guard { + const PANIC_ZERO: &'static str = "attempt to calculate the remainder with a divisor of zero"; + const PANIC_OVERFLOW: &'static str = "attempt to calculate the remainder with overflow"; + simd_rem + } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + // The only question is how to handle shifts >= ::BITS? + // Our current solution uses wrapping logic. + impl Shl::shl { + wrap_bitshift { simd_shl } } - impl IntArith for Simd { - fn add(self, rhs: Self) -> Self::Output; - fn mul(self, rhs: Self) -> Self::Output; - fn sub(self, rhs: Self) -> Self::Output; - fn div(self, rhs: Self) -> Self::Output; - fn rem(self, rhs: Self) -> Self::Output; + impl Shr::shr { + wrap_bitshift { + // This automatically monomorphizes to lshr or ashr, depending, + // so it's fine to use it for both UInts and SInts. + simd_shr + } + } +} + +// We don't need any special precautions here: +// Floats always accept arithmetic ops, but may become NaN. +for_base_ops! { + T = (f32, f64); + type Lhs = Simd; + type Rhs = Simd; + type Output = Self; + + impl Add::add { + unsafe_base { simd_add } + } + + impl Mul::mul { + unsafe_base { simd_mul } + } + + impl Sub::sub { + unsafe_base { simd_sub } + } + + impl Div::div { + unsafe_base { simd_div } + } + + impl Rem::rem { + unsafe_base { simd_rem } } } From a42420583bdb6ea788c2f7ec0a0360d99934f2a7 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Thu, 23 Dec 2021 23:14:13 -0800 Subject: [PATCH 6/6] Use Mask::any in div check --- crates/core_simd/src/ops.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs index 6cfc8f80b53c..82b007aa6966 100644 --- a/crates/core_simd/src/ops.rs +++ b/crates/core_simd/src/ops.rs @@ -1,4 +1,4 @@ -use crate::simd::{LaneCount, Mask, Simd, SimdElement, SupportedLaneCount}; +use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount}; use core::ops::{Add, Mul}; use core::ops::{BitAnd, BitOr, BitXor}; use core::ops::{Div, Rem, Sub}; @@ -70,8 +70,7 @@ macro_rules! int_divrem_guard { if $rhs.lanes_eq(Simd::splat(0)).any() { panic!($zero); } else if <$int>::MIN != 0 - && $lhs.lanes_eq(Simd::splat(<$int>::MIN)) & $rhs.lanes_eq(Simd::splat(-1 as _)) - != Mask::splat(false) + && ($lhs.lanes_eq(Simd::splat(<$int>::MIN)) & $rhs.lanes_eq(Simd::splat(-1 as _))).any() { panic!($overflow); } else {