Limit all types to 64 lanes

This commit is contained in:
Caleb Zulawski 2021-02-09 22:13:27 -05:00
parent 8aa7ba7d4c
commit 6362540f11
9 changed files with 468 additions and 135 deletions

View file

@ -33,7 +33,10 @@ macro_rules! impl_fmt_trait {
{ $($type:ident => $(($trait:ident, $format:ident)),*;)* } => {
$( // repeat type
$( // repeat trait
impl<const LANES: usize> core::fmt::$trait for crate::$type<LANES> {
impl<const LANES: usize> core::fmt::$trait for crate::$type<LANES>
where
Self: crate::LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
$format(self.as_ref(), f)
}

View file

@ -0,0 +1,35 @@
/// Implemented for bitmask sizes that are supported by the implementation.
pub trait LanesAtMost64 {}
macro_rules! impl_for {
{ $name:ident } => {
impl LanesAtMost64 for $name<1> {}
impl LanesAtMost64 for $name<2> {}
impl LanesAtMost64 for $name<4> {}
impl LanesAtMost64 for $name<8> {}
impl LanesAtMost64 for $name<16> {}
impl LanesAtMost64 for $name<32> {}
impl LanesAtMost64 for $name<64> {}
}
}
use crate::*;
impl_for! { SimdU8 }
impl_for! { SimdU16 }
impl_for! { SimdU32 }
impl_for! { SimdU64 }
impl_for! { SimdU128 }
impl_for! { SimdUsize }
impl_for! { SimdI8 }
impl_for! { SimdI16 }
impl_for! { SimdI32 }
impl_for! { SimdI64 }
impl_for! { SimdI128 }
impl_for! { SimdIsize }
impl_for! { SimdF32 }
impl_for! { SimdF64 }
impl_for! { BitMask }

View file

@ -14,6 +14,9 @@ mod intrinsics;
mod ops;
mod round;
mod lanes_at_most_64;
pub use lanes_at_most_64::*;
mod masks;
pub use masks::*;

View file

@ -29,7 +29,7 @@ macro_rules! from_transmute_x86 {
/// Implements common traits on the specified vector `$name`, holding multiple `$lanes` of `$type`.
macro_rules! impl_vector {
{ $name:ident, $type:ty } => {
impl<const LANES: usize> $name<LANES> {
impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost64 {
/// Construct a SIMD vector by setting all lanes to the given value.
pub const fn splat(value: $type) -> Self {
Self([value; LANES])
@ -72,23 +72,23 @@ macro_rules! impl_vector {
}
}
impl<const LANES: usize> Copy for $name<LANES> {}
impl<const LANES: usize> Copy for $name<LANES> where Self: crate::LanesAtMost64 {}
impl<const LANES: usize> Clone for $name<LANES> {
impl<const LANES: usize> Clone for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> Default for $name<LANES> {
impl<const LANES: usize> Default for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn default() -> Self {
Self::splat(<$type>::default())
}
}
impl<const LANES: usize> PartialEq for $name<LANES> {
impl<const LANES: usize> PartialEq for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn eq(&self, other: &Self) -> bool {
// TODO use SIMD equality
@ -96,7 +96,7 @@ macro_rules! impl_vector {
}
}
impl<const LANES: usize> PartialOrd for $name<LANES> {
impl<const LANES: usize> PartialOrd for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
// TODO use SIMD equalitya
@ -105,14 +105,14 @@ macro_rules! impl_vector {
}
// array references
impl<const LANES: usize> AsRef<[$type; LANES]> for $name<LANES> {
impl<const LANES: usize> AsRef<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn as_ref(&self) -> &[$type; LANES] {
&self.0
}
}
impl<const LANES: usize> AsMut<[$type; LANES]> for $name<LANES> {
impl<const LANES: usize> AsMut<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn as_mut(&mut self) -> &mut [$type; LANES] {
&mut self.0
@ -120,14 +120,14 @@ macro_rules! impl_vector {
}
// slice references
impl<const LANES: usize> AsRef<[$type]> for $name<LANES> {
impl<const LANES: usize> AsRef<[$type]> for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn as_ref(&self) -> &[$type] {
&self.0
}
}
impl<const LANES: usize> AsMut<[$type]> for $name<LANES> {
impl<const LANES: usize> AsMut<[$type]> for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn as_mut(&mut self) -> &mut [$type] {
&mut self.0
@ -135,14 +135,14 @@ macro_rules! impl_vector {
}
// vector/array conversion
impl<const LANES: usize> From<[$type; LANES]> for $name<LANES> {
impl<const LANES: usize> From<[$type; LANES]> for $name<LANES> where Self: crate::LanesAtMost64 {
fn from(array: [$type; LANES]) -> Self {
Self(array)
}
}
// splat
impl<const LANES: usize> From<$type> for $name<LANES> {
impl<const LANES: usize> From<$type> for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn from(value: $type) -> Self {
Self::splat(value)
@ -158,9 +158,9 @@ macro_rules! impl_integer_vector {
{ $name:ident, $type:ty } => {
impl_vector! { $name, $type }
impl<const LANES: usize> Eq for $name<LANES> {}
impl<const LANES: usize> Eq for $name<LANES> where Self: crate::LanesAtMost64 {}
impl<const LANES: usize> Ord for $name<LANES> {
impl<const LANES: usize> Ord for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
// TODO use SIMD cmp
@ -168,7 +168,7 @@ macro_rules! impl_integer_vector {
}
}
impl<const LANES: usize> core::hash::Hash for $name<LANES> {
impl<const LANES: usize> core::hash::Hash for $name<LANES> where Self: crate::LanesAtMost64 {
#[inline]
fn hash<H>(&self, state: &mut H)
where
@ -187,7 +187,11 @@ macro_rules! impl_float_vector {
{ $name:ident, $type:ty, $bits_ty:ident } => {
impl_vector! { $name, $type }
impl<const LANES: usize> $name<LANES> {
impl<const LANES: usize> $name<LANES>
where
Self: crate::LanesAtMost64,
crate::$bits_ty<LANES>: crate::LanesAtMost64,
{
/// Raw transmutation to an unsigned integer vector type with the
/// same size and number of lanes.
#[inline]

View file

@ -1,12 +1,4 @@
/// Implemented for bitmask sizes that are supported by the implementation.
pub trait LanesAtMost64 {}
impl LanesAtMost64 for BitMask<1> {}
impl LanesAtMost64 for BitMask<2> {}
impl LanesAtMost64 for BitMask<4> {}
impl LanesAtMost64 for BitMask<8> {}
impl LanesAtMost64 for BitMask<16> {}
impl LanesAtMost64 for BitMask<32> {}
impl LanesAtMost64 for BitMask<64> {}
use crate::LanesAtMost64;
/// A mask where each lane is represented by a single bit.
#[derive(Copy, Clone, Debug)]

View file

@ -16,11 +16,31 @@ impl core::fmt::Display for TryFromMaskError {
macro_rules! define_mask {
{ $(#[$attr:meta])* struct $name:ident<const $lanes:ident: usize>($type:ty); } => {
$(#[$attr])*
#[derive(Copy, Clone, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[derive(Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[repr(transparent)]
pub struct $name<const $lanes: usize>($type);
pub struct $name<const $lanes: usize>($type)
where
$type: crate::LanesAtMost64;
impl<const $lanes: usize> $name<$lanes> {
impl<const LANES: usize> Copy for $name<LANES>
where
$type: crate::LanesAtMost64,
{}
impl<const LANES: usize> Clone for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const $lanes: usize> $name<$lanes>
where
$type: crate::LanesAtMost64,
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
Self(<$type>::splat(
@ -57,13 +77,19 @@ macro_rules! define_mask {
}
}
impl<const $lanes: usize> core::convert::From<bool> for $name<$lanes> {
impl<const $lanes: usize> core::convert::From<bool> for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
fn from(value: bool) -> Self {
Self::splat(value)
}
}
impl<const $lanes: usize> core::convert::TryFrom<$type> for $name<$lanes> {
impl<const $lanes: usize> core::convert::TryFrom<$type> for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
type Error = TryFromMaskError;
fn try_from(value: $type) -> Result<Self, Self::Error> {
if value.as_slice().iter().all(|x| *x == 0 || *x == -1) {
@ -74,7 +100,10 @@ macro_rules! define_mask {
}
}
impl<const $lanes: usize> core::convert::From<$name<$lanes>> for $type {
impl<const $lanes: usize> core::convert::From<$name<$lanes>> for $type
where
$type: crate::LanesAtMost64,
{
fn from(value: $name<$lanes>) -> Self {
value.0
}
@ -82,6 +111,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::convert::From<crate::BitMask<$lanes>> for $name<$lanes>
where
$type: crate::LanesAtMost64,
crate::BitMask<$lanes>: crate::LanesAtMost64,
{
fn from(value: crate::BitMask<$lanes>) -> Self {
@ -96,6 +126,7 @@ macro_rules! define_mask {
impl<const $lanes: usize> core::convert::From<$name<$lanes>> for crate::BitMask<$lanes>
where
$type: crate::LanesAtMost64,
crate::BitMask<$lanes>: crate::LanesAtMost64,
{
fn from(value: $name<$lanes>) -> Self {
@ -108,7 +139,10 @@ macro_rules! define_mask {
}
}
impl<const $lanes: usize> core::fmt::Debug for $name<$lanes> {
impl<const $lanes: usize> core::fmt::Debug for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_list()
.entries((0..LANES).map(|lane| self.test(lane)))
@ -116,31 +150,46 @@ macro_rules! define_mask {
}
}
impl<const $lanes: usize> core::fmt::Binary for $name<$lanes> {
impl<const $lanes: usize> core::fmt::Binary for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Binary::fmt(&self.0, f)
}
}
impl<const $lanes: usize> core::fmt::Octal for $name<$lanes> {
impl<const $lanes: usize> core::fmt::Octal for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Octal::fmt(&self.0, f)
}
}
impl<const $lanes: usize> core::fmt::LowerHex for $name<$lanes> {
impl<const $lanes: usize> core::fmt::LowerHex for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::LowerHex::fmt(&self.0, f)
}
}
impl<const $lanes: usize> core::fmt::UpperHex for $name<$lanes> {
impl<const $lanes: usize> core::fmt::UpperHex for $name<$lanes>
where
$type: crate::LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::UpperHex::fmt(&self.0, f)
}
}
impl<const LANES: usize> core::ops::BitAnd for $name<LANES> {
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
@ -148,7 +197,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES> {
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: bool) -> Self {
@ -156,7 +208,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool {
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool
where
$type: crate::LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn bitand(self, rhs: $name<LANES>) -> $name<LANES> {
@ -164,7 +219,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitOr for $name<LANES> {
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
@ -172,7 +230,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES> {
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: bool) -> Self {
@ -180,7 +241,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool {
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool
where
$type: crate::LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn bitor(self, rhs: $name<LANES>) -> $name<LANES> {
@ -188,7 +252,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitXor for $name<LANES> {
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
@ -196,7 +263,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES> {
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: bool) -> Self::Output {
@ -204,7 +274,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool {
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool
where
$type: crate::LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn bitxor(self, rhs: $name<LANES>) -> Self::Output {
@ -212,7 +285,10 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::Not for $name<LANES> {
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
$type: crate::LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn not(self) -> Self::Output {
@ -220,42 +296,60 @@ macro_rules! define_mask {
}
}
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES> {
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
self.0 &= rhs.0;
}
}
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES> {
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
*self &= Self::splat(rhs);
}
}
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES> {
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0;
}
}
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES> {
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
*self |= Self::splat(rhs);
}
}
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES> {
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
self.0 ^= rhs.0;
}
}
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES> {
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES>
where
$type: crate::LanesAtMost64,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
*self ^= Self::splat(rhs);
@ -291,11 +385,11 @@ define_mask! {
define_mask! {
/// A mask equivalent to [SimdI128](crate::SimdI128), where all bits in the lane must be either set
/// or unset.
struct SimdMask128<const LANES: usize>(crate::SimdI64<LANES>);
struct SimdMask128<const LANES: usize>(crate::SimdI128<LANES>);
}
define_mask! {
/// A mask equivalent to [SimdIsize](crate::SimdIsize), where all bits in the lane must be either set
/// or unset.
struct SimdMaskSize<const LANES: usize>(crate::SimdI64<LANES>);
struct SimdMaskSize<const LANES: usize>(crate::SimdIsize<LANES>);
}

View file

@ -7,16 +7,22 @@ pub use full_masks::*;
mod bitmask;
pub use bitmask::*;
use crate::LanesAtMost64;
macro_rules! define_opaque_mask {
{
$(#[$attr:meta])*
struct $name:ident<const $lanes:ident: usize>($inner_ty:ty);
@bits $bits_ty:ty
} => {
$(#[$attr])*
#[allow(non_camel_case_types)]
pub struct $name<const $lanes: usize>($inner_ty) where BitMask<LANES>: LanesAtMost64;
pub struct $name<const $lanes: usize>($inner_ty) where $bits_ty: LanesAtMost64;
impl<const $lanes: usize> $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
impl<const $lanes: usize> $name<$lanes>
where
$bits_ty: LanesAtMost64
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
Self(<$inner_ty>::splat(value))
@ -43,6 +49,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<BitMask<$lanes>> for $name<$lanes>
where
$bits_ty: LanesAtMost64,
BitMask<$lanes>: LanesAtMost64,
{
fn from(value: BitMask<$lanes>) -> Self {
@ -52,6 +59,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<$name<$lanes>> for crate::BitMask<$lanes>
where
$bits_ty: LanesAtMost64,
BitMask<$lanes>: LanesAtMost64,
{
fn from(value: $name<$lanes>) -> Self {
@ -61,7 +69,7 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<$inner_ty> for $name<$lanes>
where
BitMask<$lanes>: LanesAtMost64,
$bits_ty: LanesAtMost64,
{
fn from(value: $inner_ty) -> Self {
Self(value)
@ -70,50 +78,72 @@ macro_rules! define_opaque_mask {
impl<const $lanes: usize> From<$name<$lanes>> for $inner_ty
where
BitMask<$lanes>: LanesAtMost64,
$bits_ty: LanesAtMost64,
{
fn from(value: $name<$lanes>) -> Self {
value.0
}
}
impl<const $lanes: usize> Copy for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {}
impl<const $lanes: usize> Copy for $name<$lanes>
where
$inner_ty: Copy,
$bits_ty: LanesAtMost64,
{}
impl<const $lanes: usize> Clone for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
impl<const $lanes: usize> Clone for $name<$lanes>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const $lanes: usize> Default for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
impl<const $lanes: usize> Default for $name<$lanes>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn default() -> Self {
Self::splat(false)
}
}
impl<const $lanes: usize> PartialEq for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
impl<const $lanes: usize> PartialEq for $name<$lanes>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<const $lanes: usize> PartialOrd for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
impl<const $lanes: usize> PartialOrd for $name<$lanes>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<const $lanes: usize> core::fmt::Debug for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
impl<const $lanes: usize> core::fmt::Debug for $name<$lanes>
where
$bits_ty: LanesAtMost64,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Debug::fmt(&self.0, f)
}
}
impl<const LANES: usize> core::ops::BitAnd for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
@ -121,7 +151,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: bool) -> Self {
@ -129,7 +162,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool
where
$bits_ty: LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn bitand(self, rhs: $name<LANES>) -> $name<LANES> {
@ -137,7 +173,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitOr for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
@ -145,7 +184,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: bool) -> Self {
@ -153,7 +195,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool
where
$bits_ty: LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn bitor(self, rhs: $name<LANES>) -> $name<LANES> {
@ -161,7 +206,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitXor for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
@ -169,7 +217,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: bool) -> Self::Output {
@ -177,7 +228,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool
where
$bits_ty: LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn bitxor(self, rhs: $name<LANES>) -> Self::Output {
@ -185,7 +239,10 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::Not for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
type Output = $name<LANES>;
#[inline]
fn not(self) -> Self::Output {
@ -193,42 +250,60 @@ macro_rules! define_opaque_mask {
}
}
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
self.0 &= rhs.0;
}
}
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
*self &= Self::splat(rhs);
}
}
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0;
}
}
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
*self |= Self::splat(rhs);
}
}
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
self.0 ^= rhs.0;
}
}
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES>
where
$bits_ty: LanesAtMost64,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
*self ^= Self::splat(rhs);
@ -242,6 +317,7 @@ define_opaque_mask! {
///
/// The layout of this type is unspecified.
struct Mask8<const LANES: usize>(SimdMask8<LANES>);
@bits crate::SimdI8<LANES>
}
define_opaque_mask! {
@ -249,6 +325,7 @@ define_opaque_mask! {
///
/// The layout of this type is unspecified.
struct Mask16<const LANES: usize>(SimdMask16<LANES>);
@bits crate::SimdI16<LANES>
}
define_opaque_mask! {
@ -256,6 +333,7 @@ define_opaque_mask! {
///
/// The layout of this type is unspecified.
struct Mask32<const LANES: usize>(SimdMask32<LANES>);
@bits crate::SimdI32<LANES>
}
define_opaque_mask! {
@ -263,6 +341,7 @@ define_opaque_mask! {
///
/// The layout of this type is unspecified.
struct Mask64<const LANES: usize>(SimdMask64<LANES>);
@bits crate::SimdI64<LANES>
}
define_opaque_mask! {
@ -270,6 +349,7 @@ define_opaque_mask! {
///
/// The layout of this type is unspecified.
struct Mask128<const LANES: usize>(SimdMask128<LANES>);
@bits crate::SimdI128<LANES>
}
define_opaque_mask! {
@ -277,12 +357,17 @@ define_opaque_mask! {
///
/// The layout of this type is unspecified.
struct MaskSize<const LANES: usize>(SimdMaskSize<LANES>);
@bits crate::SimdIsize<LANES>
}
macro_rules! implement_mask_ops {
{ $($vector:ident => $mask:ident,)* } => {
{ $($vector:ident => $mask:ident ($inner_ty:ident),)* } => {
$(
impl<const LANES: usize> crate::$vector<LANES> where BitMask<LANES>: LanesAtMost64 {
impl<const LANES: usize> crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
crate::$inner_ty<LANES>: LanesAtMost64,
{
/// Test if each lane is equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_eq(&self, other: &Self) -> $mask<LANES> {
@ -324,22 +409,22 @@ macro_rules! implement_mask_ops {
}
implement_mask_ops! {
SimdI8 => Mask8,
SimdI16 => Mask16,
SimdI32 => Mask32,
SimdI64 => Mask64,
SimdI128 => Mask128,
SimdIsize => MaskSize,
SimdI8 => Mask8 (SimdI8),
SimdI16 => Mask16 (SimdI16),
SimdI32 => Mask32 (SimdI32),
SimdI64 => Mask64 (SimdI64),
SimdI128 => Mask128 (SimdI128),
SimdIsize => MaskSize (SimdIsize),
SimdU8 => Mask8,
SimdU16 => Mask16,
SimdU32 => Mask32,
SimdU64 => Mask64,
SimdU128 => Mask128,
SimdUsize => MaskSize,
SimdU8 => Mask8 (SimdI8),
SimdU16 => Mask16 (SimdI16),
SimdU32 => Mask32 (SimdI32),
SimdU64 => Mask64 (SimdI64),
SimdU128 => Mask128 (SimdI128),
SimdUsize => MaskSize (SimdIsize),
SimdF32 => Mask32,
SimdF64 => Mask64,
SimdF32 => Mask32 (SimdI32),
SimdF64 => Mask64 (SimdI64),
}
/// Vector of eight 8-bit masks

View file

@ -1,3 +1,5 @@
use crate::LanesAtMost64;
/// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
fn invalid_shift_rhs<T>(rhs: T) -> bool
where
@ -12,21 +14,30 @@ where
macro_rules! impl_ref_ops {
// binary op
{
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty {
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
where
$($bound:path: LanesAtMost64,)*
{
type Output = $output:ty;
$(#[$attrs:meta])*
fn $fn:ident($self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) -> Self::Output $body:tt
}
} => {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
where
$($bound: LanesAtMost64,)*
{
type Output = $output;
$(#[$attrs])*
fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body
}
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
$($bound: LanesAtMost64,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
$(#[$attrs])*
@ -35,7 +46,10 @@ macro_rules! impl_ref_ops {
}
}
impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type {
impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type
where
$($bound: LanesAtMost64,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
$(#[$attrs])*
@ -44,7 +58,10 @@ macro_rules! impl_ref_ops {
}
}
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type
where
$($bound: LanesAtMost64,)*
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
$(#[$attrs])*
@ -56,17 +73,26 @@ macro_rules! impl_ref_ops {
// binary assignment op
{
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty {
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
where
$($bound:path: LanesAtMost64,)*
{
$(#[$attrs:meta])*
fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt
}
} => {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type {
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
where
$($bound: LanesAtMost64,)*
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
}
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type {
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
$($bound: LanesAtMost64,)*
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
core::ops::$trait::$fn($self_tok, *$rhs_arg)
@ -76,17 +102,26 @@ macro_rules! impl_ref_ops {
// unary op
{
impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty {
impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty
where
$($bound:path: LanesAtMost64,)*
{
type Output = $output:ty;
fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
}
} => {
impl<const $lanes: usize> core::ops::$trait for $type {
impl<const $lanes: usize> core::ops::$trait for $type
where
$($bound: LanesAtMost64,)*
{
type Output = $output;
fn $fn($self_tok) -> Self::Output $body
}
impl<const $lanes: usize> core::ops::$trait for &'_ $type {
impl<const $lanes: usize> core::ops::$trait for &'_ $type
where
$($bound: LanesAtMost64,)*
{
type Output = <$type as core::ops::$trait>::Output;
fn $fn($self_tok) -> Self::Output {
core::ops::$trait::$fn(*$self_tok)
@ -130,7 +165,10 @@ macro_rules! impl_op {
{ impl Not for $type:ident, $scalar:ty } => {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Not for crate::$type<LANES> {
impl<const LANES: usize> core::ops::Not for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
{
type Output = Self;
fn not(self) -> Self::Output {
self ^ Self::splat(!<$scalar>::default())
@ -141,7 +179,10 @@ macro_rules! impl_op {
{ impl Neg for $type:ident, $scalar:ty } => {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES> {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
{
type Output = Self;
fn neg(self) -> Self::Output {
Self::splat(0) - self
@ -152,7 +193,12 @@ macro_rules! impl_op {
{ impl Neg for $type:ident, $scalar:ty, @float } => {
impl_ref_ops! {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES> {
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
crate::SimdU32<LANES>: LanesAtMost64,
crate::SimdU64<LANES>: LanesAtMost64,
{
type Output = Self;
fn neg(self) -> Self::Output {
// FIXME: Replace this with fneg intrinsic once available.
@ -166,6 +212,7 @@ macro_rules! impl_op {
{ impl Index for $type:ident, $scalar:ty } => {
impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
where
Self: LanesAtMost64,
I: core::slice::SliceIndex<[$scalar]>,
{
type Output = I::Output;
@ -177,6 +224,7 @@ macro_rules! impl_op {
impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
where
Self: LanesAtMost64,
I: core::slice::SliceIndex<[$scalar]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
@ -189,7 +237,10 @@ macro_rules! impl_op {
// generic binary op with assignment when output is `Self`
{ @binary $type:ident, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<Self> for crate::$type<LANES> {
impl<const LANES: usize> core::ops::$trait<Self> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -202,7 +253,10 @@ macro_rules! impl_op {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<$scalar> for crate::$type<LANES> {
impl<const LANES: usize> core::ops::$trait<$scalar> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -213,7 +267,10 @@ macro_rules! impl_op {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::$trait<crate::$type<LANES>> for $scalar {
impl<const LANES: usize> core::ops::$trait<crate::$type<LANES>> for $scalar
where
crate::$type<LANES>: LanesAtMost64,
{
type Output = crate::$type<LANES>;
#[inline]
@ -224,7 +281,10 @@ macro_rules! impl_op {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::$assign_trait<Self> for crate::$type<LANES> {
impl<const LANES: usize> core::ops::$assign_trait<Self> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
{
#[inline]
fn $assign_trait_fn(&mut self, rhs: Self) {
unsafe {
@ -235,7 +295,10 @@ macro_rules! impl_op {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::$assign_trait<$scalar> for crate::$type<LANES> {
impl<const LANES: usize> core::ops::$assign_trait<$scalar> for crate::$type<LANES>
where
crate::$type<LANES>: LanesAtMost64,
{
#[inline]
fn $assign_trait_fn(&mut self, rhs: $scalar) {
core::ops::$assign_trait::$assign_trait_fn(self, Self::splat(rhs));
@ -278,7 +341,10 @@ macro_rules! impl_unsigned_int_ops {
// Integers panic on divide by 0
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Div<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -303,7 +369,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Div<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -323,7 +392,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Div<crate::$vector<LANES>> for $scalar {
impl<const LANES: usize> core::ops::Div<crate::$vector<LANES>> for $scalar
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = crate::$vector<LANES>;
#[inline]
@ -334,7 +406,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::DivAssign<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::DivAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn div_assign(&mut self, rhs: Self) {
*self = *self / rhs;
@ -343,7 +418,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::DivAssign<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::DivAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn div_assign(&mut self, rhs: $scalar) {
*self = *self / rhs;
@ -353,7 +431,10 @@ macro_rules! impl_unsigned_int_ops {
// remainder panics on zero divisor
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Rem<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -378,7 +459,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Rem<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -398,7 +482,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Rem<crate::$vector<LANES>> for $scalar {
impl<const LANES: usize> core::ops::Rem<crate::$vector<LANES>> for $scalar
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = crate::$vector<LANES>;
#[inline]
@ -409,7 +496,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::RemAssign<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::RemAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn rem_assign(&mut self, rhs: Self) {
*self = *self % rhs;
@ -418,7 +508,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::RemAssign<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::RemAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn rem_assign(&mut self, rhs: $scalar) {
*self = *self % rhs;
@ -428,7 +521,10 @@ macro_rules! impl_unsigned_int_ops {
// shifts panic on overflow
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shl<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Shl<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -447,7 +543,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shl<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Shl<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -463,7 +562,10 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShlAssign<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::ShlAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn shl_assign(&mut self, rhs: Self) {
*self = *self << rhs;
@ -472,7 +574,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShlAssign<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::ShlAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn shl_assign(&mut self, rhs: $scalar) {
*self = *self << rhs;
@ -481,7 +586,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shr<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Shr<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -500,7 +608,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::Shr<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::Shr<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
type Output = Self;
#[inline]
@ -516,7 +627,10 @@ macro_rules! impl_unsigned_int_ops {
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShrAssign<Self> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::ShrAssign<Self> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn shr_assign(&mut self, rhs: Self) {
*self = *self >> rhs;
@ -525,7 +639,10 @@ macro_rules! impl_unsigned_int_ops {
}
impl_ref_ops! {
impl<const LANES: usize> core::ops::ShrAssign<$scalar> for crate::$vector<LANES> {
impl<const LANES: usize> core::ops::ShrAssign<$scalar> for crate::$vector<LANES>
where
crate::$vector<LANES>: LanesAtMost64,
{
#[inline]
fn shr_assign(&mut self, rhs: $scalar) {
*self = *self >> rhs;

View file

@ -2,7 +2,7 @@
/// A SIMD vector of containing `LANES` `u8` values.
#[repr(simd)]
pub struct SimdU8<const LANES: usize>([u8; LANES]);
pub struct SimdU8<const LANES: usize>([u8; LANES]) where Self: crate::LanesAtMost64;
impl_integer_vector! { SimdU8, u8 }