From 644e8f89002cd19aa0b2ab65c08520fcb2d848bf Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sun, 5 Mar 2023 15:41:21 +0000 Subject: [PATCH] Replace associated const hacks with inline consts Fixes #1368 --- .../crates/core_arch/src/aarch64/armclang.rs | 2 +- .../core_arch/src/aarch64/neon/generated.rs | 872 +++++++------- .../crates/core_arch/src/aarch64/neon/mod.rs | 154 +-- .../crates/core_arch/src/aarch64/prefetch.rs | 4 +- .../crates/core_arch/src/aarch64/tme.rs | 2 +- .../crates/core_arch/src/arm/armclang.rs | 2 +- .../stdarch/crates/core_arch/src/arm/mod.rs | 2 +- .../stdarch/crates/core_arch/src/arm/neon.rs | 88 +- .../src/arm_shared/neon/generated.rs | 1062 ++++++++--------- .../core_arch/src/arm_shared/neon/mod.rs | 100 +- .../stdarch/crates/core_arch/src/macros.rs | 116 +- .../stdarch/crates/core_arch/src/mips/msa.rs | 295 +++-- .../crates/core_arch/src/mips/msa/macros.rs | 31 - .../crates/core_arch/src/powerpc/vsx.rs | 2 +- .../crates/core_arch/src/riscv_shared/mod.rs | 4 +- .../crates/core_arch/src/wasm32/memory.rs | 4 +- .../crates/core_arch/src/wasm32/simd128.rs | 92 +- .../stdarch/crates/core_arch/src/x86/aes.rs | 2 +- .../stdarch/crates/core_arch/src/x86/avx.rs | 58 +- .../stdarch/crates/core_arch/src/x86/avx2.rs | 56 +- .../crates/core_arch/src/x86/avx512bw.rs | 152 +-- .../crates/core_arch/src/x86/avx512f.rs | 830 ++++++------- .../crates/core_arch/src/x86/avx512vbmi2.rs | 108 +- .../stdarch/crates/core_arch/src/x86/f16c.rs | 4 +- .../stdarch/crates/core_arch/src/x86/gfni.rs | 36 +- .../crates/core_arch/src/x86/macros.rs | 75 +- .../crates/core_arch/src/x86/pclmulqdq.rs | 2 +- .../stdarch/crates/core_arch/src/x86/rtm.rs | 2 +- .../stdarch/crates/core_arch/src/x86/sha.rs | 2 +- .../stdarch/crates/core_arch/src/x86/sse.rs | 2 +- .../stdarch/crates/core_arch/src/x86/sse2.rs | 36 +- .../stdarch/crates/core_arch/src/x86/sse41.rs | 32 +- .../stdarch/crates/core_arch/src/x86/sse42.rs | 28 +- .../stdarch/crates/core_arch/src/x86/ssse3.rs | 2 +- .../stdarch/crates/core_arch/src/x86/test.rs | 4 +- .../crates/core_arch/src/x86/vpclmulqdq.rs | 4 +- .../crates/core_arch/src/x86_64/avx.rs | 2 +- .../crates/core_arch/src/x86_64/avx2.rs | 2 +- .../crates/core_arch/src/x86_64/macros.rs | 28 +- .../crates/core_arch/src/x86_64/sse41.rs | 4 +- .../stdarch/crates/stdarch-gen/src/main.rs | 16 +- 41 files changed, 2095 insertions(+), 2224 deletions(-) delete mode 100644 library/stdarch/crates/core_arch/src/mips/msa/macros.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/armclang.rs b/library/stdarch/crates/core_arch/src/aarch64/armclang.rs index 7ad6ae50c1c0..9a608702adec 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/armclang.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/armclang.rs @@ -18,6 +18,6 @@ use stdarch_test::assert_instr; #[inline(always)] #[rustc_legacy_const_generics(0)] pub unsafe fn __breakpoint() { - static_assert_imm16!(VAL); + static_assert_uimm_bits!(VAL, 16); crate::arch::asm!("brk {}", const VAL); } diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 170e515f5acc..cb5413fa3743 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -2277,8 +2277,8 @@ pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 { #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_imm3!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -2301,8 +2301,8 @@ pub unsafe fn vcopy_lane_s8(a: int8x8_t, b: #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_imm4!(LANE1); - static_assert_imm4!(LANE2); + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), @@ -2333,8 +2333,8 @@ pub unsafe fn vcopyq_laneq_s8(a: int8x16_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -2353,8 +2353,8 @@ pub unsafe fn vcopy_lane_s16(a: int16x4_t, b #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -2377,8 +2377,8 @@ pub unsafe fn vcopyq_laneq_s16(a: int16x8_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2395,8 +2395,8 @@ pub unsafe fn vcopy_lane_s32(a: int32x2_t, b #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -2415,8 +2415,8 @@ pub unsafe fn vcopyq_laneq_s32(a: int32x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2433,8 +2433,8 @@ pub unsafe fn vcopyq_laneq_s64(a: int64x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -2457,8 +2457,8 @@ pub unsafe fn vcopy_lane_u8(a: uint8x8_t, b: #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_imm4!(LANE1); - static_assert_imm4!(LANE2); + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), @@ -2489,8 +2489,8 @@ pub unsafe fn vcopyq_laneq_u8(a: uint8x16_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -2509,8 +2509,8 @@ pub unsafe fn vcopy_lane_u16(a: uint16x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -2533,8 +2533,8 @@ pub unsafe fn vcopyq_laneq_u16(a: uint16x8_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2551,8 +2551,8 @@ pub unsafe fn vcopy_lane_u32(a: uint32x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -2571,8 +2571,8 @@ pub unsafe fn vcopyq_laneq_u32(a: uint32x4_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2589,8 +2589,8 @@ pub unsafe fn vcopyq_laneq_u64(a: uint64x2_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -2613,8 +2613,8 @@ pub unsafe fn vcopy_lane_p8(a: poly8x8_t, b: #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_imm4!(LANE1); - static_assert_imm4!(LANE2); + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 4); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), @@ -2645,8 +2645,8 @@ pub unsafe fn vcopyq_laneq_p8(a: poly8x16_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_imm2!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -2665,8 +2665,8 @@ pub unsafe fn vcopy_lane_p16(a: poly16x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_imm3!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 3); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -2689,8 +2689,8 @@ pub unsafe fn vcopyq_laneq_p16(a: poly16x8_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2707,8 +2707,8 @@ pub unsafe fn vcopyq_laneq_p64(a: poly64x2_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2725,8 +2725,8 @@ pub unsafe fn vcopy_lane_f32(a: float32x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 2); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -2745,8 +2745,8 @@ pub unsafe fn vcopyq_laneq_f32(a: float32x4_ #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -2763,8 +2763,8 @@ pub unsafe fn vcopyq_laneq_f64(a: float64x2_ #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s8(a: int8x8_t, b: int8x16_t) -> int8x8_t { - static_assert_imm3!(LANE1); - static_assert_imm4!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), @@ -2788,8 +2788,8 @@ pub unsafe fn vcopy_laneq_s8(a: int8x8_t, b: #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_imm2!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), @@ -2809,8 +2809,8 @@ pub unsafe fn vcopy_laneq_s16(a: int16x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_imm1!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), @@ -2828,8 +2828,8 @@ pub unsafe fn vcopy_laneq_s32(a: int32x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u8(a: uint8x8_t, b: uint8x16_t) -> uint8x8_t { - static_assert_imm3!(LANE1); - static_assert_imm4!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), @@ -2853,8 +2853,8 @@ pub unsafe fn vcopy_laneq_u8(a: uint8x8_t, b #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { - static_assert_imm2!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), @@ -2874,8 +2874,8 @@ pub unsafe fn vcopy_laneq_u16(a: uint16x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { - static_assert_imm1!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), @@ -2893,8 +2893,8 @@ pub unsafe fn vcopy_laneq_u32(a: uint32x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_p8(a: poly8x8_t, b: poly8x16_t) -> poly8x8_t { - static_assert_imm3!(LANE1); - static_assert_imm4!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 4); let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), @@ -2918,8 +2918,8 @@ pub unsafe fn vcopy_laneq_p8(a: poly8x8_t, b #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_p16(a: poly16x4_t, b: poly16x8_t) -> poly16x4_t { - static_assert_imm2!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 3); let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), @@ -2939,8 +2939,8 @@ pub unsafe fn vcopy_laneq_p16(a: poly16x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopy_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_imm1!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 2); let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), @@ -2958,8 +2958,8 @@ pub unsafe fn vcopy_laneq_f32(a: float32x2_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s8(a: int8x16_t, b: int8x8_t) -> int8x16_t { - static_assert_imm4!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), @@ -2991,8 +2991,8 @@ pub unsafe fn vcopyq_lane_s8(a: int8x16_t, b #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_imm3!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), @@ -3016,8 +3016,8 @@ pub unsafe fn vcopyq_lane_s16(a: int16x8_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_imm2!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), @@ -3037,8 +3037,8 @@ pub unsafe fn vcopyq_lane_s32(a: int32x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t { - static_assert_imm4!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), @@ -3070,8 +3070,8 @@ pub unsafe fn vcopyq_lane_u8(a: uint8x16_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { - static_assert_imm3!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), @@ -3095,8 +3095,8 @@ pub unsafe fn vcopyq_lane_u16(a: uint16x8_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { - static_assert_imm2!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), @@ -3116,8 +3116,8 @@ pub unsafe fn vcopyq_lane_u32(a: uint32x4_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_p8(a: poly8x16_t, b: poly8x8_t) -> poly8x16_t { - static_assert_imm4!(LANE1); - static_assert_imm3!(LANE2); + static_assert_uimm_bits!(LANE1, 4); + static_assert_uimm_bits!(LANE2, 3); let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), @@ -3149,8 +3149,8 @@ pub unsafe fn vcopyq_lane_p8(a: poly8x16_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_p16(a: poly16x8_t, b: poly16x4_t) -> poly16x8_t { - static_assert_imm3!(LANE1); - static_assert_imm2!(LANE2); + static_assert_uimm_bits!(LANE1, 3); + static_assert_uimm_bits!(LANE2, 2); let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), @@ -3174,8 +3174,8 @@ pub unsafe fn vcopyq_lane_p16(a: poly16x8_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_s64(a: int64x2_t, b: int64x1_t) -> int64x2_t { - static_assert_imm1!(LANE1); - static_assert!(LANE2 : i32 where LANE2 == 0); + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), @@ -3193,8 +3193,8 @@ pub unsafe fn vcopyq_lane_s64(a: int64x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_u64(a: uint64x2_t, b: uint64x1_t) -> uint64x2_t { - static_assert_imm1!(LANE1); - static_assert!(LANE2 : i32 where LANE2 == 0); + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), @@ -3212,8 +3212,8 @@ pub unsafe fn vcopyq_lane_u64(a: uint64x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_p64(a: poly64x2_t, b: poly64x1_t) -> poly64x2_t { - static_assert_imm1!(LANE1); - static_assert!(LANE2 : i32 where LANE2 == 0); + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), @@ -3231,8 +3231,8 @@ pub unsafe fn vcopyq_lane_p64(a: poly64x2_t, #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_imm2!(LANE1); - static_assert_imm1!(LANE2); + static_assert_uimm_bits!(LANE1, 2); + static_assert_uimm_bits!(LANE2, 1); let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), @@ -3252,8 +3252,8 @@ pub unsafe fn vcopyq_lane_f32(a: float32x4_t #[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcopyq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert_imm1!(LANE1); - static_assert!(LANE2 : i32 where LANE2 == 0); + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), @@ -3409,7 +3409,7 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64")] @@ -3427,7 +3427,7 @@ pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64")] @@ -3445,7 +3445,7 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32")] @@ -3463,7 +3463,7 @@ pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64")] @@ -3481,7 +3481,7 @@ pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64")] @@ -3499,7 +3499,7 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64")] @@ -3517,7 +3517,7 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32")] @@ -3535,7 +3535,7 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64")] @@ -3553,7 +3553,7 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64")] @@ -3571,7 +3571,7 @@ pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64")] @@ -3589,7 +3589,7 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32")] @@ -3607,7 +3607,7 @@ pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64")] @@ -3625,7 +3625,7 @@ pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64")] @@ -3643,7 +3643,7 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64")] @@ -3661,7 +3661,7 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32")] @@ -3679,7 +3679,7 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64")] @@ -4617,7 +4617,7 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -4630,7 +4630,7 @@ pub unsafe fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -4643,7 +4643,7 @@ pub unsafe fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -4656,7 +4656,7 @@ pub unsafe fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -4669,7 +4669,7 @@ pub unsafe fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -4682,7 +4682,7 @@ pub unsafe fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -4695,7 +4695,7 @@ pub unsafe fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); transmute::(simd_extract(a, N as u32)) } @@ -4708,7 +4708,7 @@ pub unsafe fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); transmute::(simd_extract(a, N as u32)) } @@ -4721,7 +4721,7 @@ pub unsafe fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(a, N as u32) } @@ -4734,7 +4734,7 @@ pub unsafe fn vdupb_lane_s8(a: int8x8_t) -> i8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_extract(a, N as u32) } @@ -4747,7 +4747,7 @@ pub unsafe fn vdupb_laneq_s8(a: int8x16_t) -> i8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_extract(a, N as u32) } @@ -4760,7 +4760,7 @@ pub unsafe fn vduph_lane_s16(a: int16x4_t) -> i16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(a, N as u32) } @@ -4773,7 +4773,7 @@ pub unsafe fn vduph_laneq_s16(a: int16x8_t) -> i16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_extract(a, N as u32) } @@ -4786,7 +4786,7 @@ pub unsafe fn vdups_lane_s32(a: int32x2_t) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_extract(a, N as u32) } @@ -4799,7 +4799,7 @@ pub unsafe fn vdups_laneq_s32(a: int32x4_t) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_extract(a, N as u32) } @@ -4812,7 +4812,7 @@ pub unsafe fn vdupd_lane_s64(a: int64x1_t) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_extract(a, N as u32) } @@ -4825,7 +4825,7 @@ pub unsafe fn vdupd_laneq_s64(a: int64x2_t) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(a, N as u32) } @@ -4838,7 +4838,7 @@ pub unsafe fn vdupb_lane_u8(a: uint8x8_t) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_extract(a, N as u32) } @@ -4851,7 +4851,7 @@ pub unsafe fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_extract(a, N as u32) } @@ -4864,7 +4864,7 @@ pub unsafe fn vduph_lane_u16(a: uint16x4_t) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(a, N as u32) } @@ -4877,7 +4877,7 @@ pub unsafe fn vduph_laneq_u16(a: uint16x8_t) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_extract(a, N as u32) } @@ -4890,7 +4890,7 @@ pub unsafe fn vdups_lane_u32(a: uint32x2_t) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_extract(a, N as u32) } @@ -4903,7 +4903,7 @@ pub unsafe fn vdups_laneq_u32(a: uint32x4_t) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_extract(a, N as u32) } @@ -4916,7 +4916,7 @@ pub unsafe fn vdupd_lane_u64(a: uint64x1_t) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_extract(a, N as u32) } @@ -4929,7 +4929,7 @@ pub unsafe fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(a, N as u32) } @@ -4942,7 +4942,7 @@ pub unsafe fn vdupb_lane_p8(a: poly8x8_t) -> p8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_extract(a, N as u32) } @@ -4955,7 +4955,7 @@ pub unsafe fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_extract(a, N as u32) } @@ -4968,7 +4968,7 @@ pub unsafe fn vduph_lane_p16(a: poly16x4_t) -> p16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(a, N as u32) } @@ -4981,7 +4981,7 @@ pub unsafe fn vduph_laneq_p16(a: poly16x8_t) -> p16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_extract(a, N as u32) } @@ -4994,7 +4994,7 @@ pub unsafe fn vdups_lane_f32(a: float32x2_t) -> f32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_extract(a, N as u32) } @@ -5007,7 +5007,7 @@ pub unsafe fn vdups_laneq_f32(a: float32x4_t) -> f32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_extract(a, N as u32) } @@ -5020,7 +5020,7 @@ pub unsafe fn vdupd_lane_f64(a: float64x1_t) -> f64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_extract(a, N as u32) } @@ -5033,7 +5033,7 @@ pub unsafe fn vdupd_laneq_f64(a: float64x2_t) -> f64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -5050,7 +5050,7 @@ pub unsafe fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_ #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -5211,7 +5211,7 @@ pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2 #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5224,7 +5224,7 @@ pub unsafe fn vmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5237,7 +5237,7 @@ pub unsafe fn vmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5250,7 +5250,7 @@ pub unsafe fn vmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5263,7 +5263,7 @@ pub unsafe fn vmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5276,7 +5276,7 @@ pub unsafe fn vmlal_high_lane_u16(a: uint32x4_t, b: uint16x8_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5289,7 +5289,7 @@ pub unsafe fn vmlal_high_laneq_u16(a: uint32x4_t, b: uint16x8_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_lane_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5302,7 +5302,7 @@ pub unsafe fn vmlal_high_lane_u32(a: uint64x2_t, b: uint32x4_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlal_high_laneq_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5459,7 +5459,7 @@ pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2 #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5472,7 +5472,7 @@ pub unsafe fn vmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5485,7 +5485,7 @@ pub unsafe fn vmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5498,7 +5498,7 @@ pub unsafe fn vmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5511,7 +5511,7 @@ pub unsafe fn vmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5524,7 +5524,7 @@ pub unsafe fn vmlsl_high_lane_u16(a: uint32x4_t, b: uint16x8_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5537,7 +5537,7 @@ pub unsafe fn vmlsl_high_laneq_u16(a: uint32x4_t, b: uint16x8_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_lane_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5550,7 +5550,7 @@ pub unsafe fn vmlsl_high_lane_u32(a: uint64x2_t, b: uint32x4_t, #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmlsl_high_laneq_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -6742,7 +6742,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8")] @@ -6760,7 +6760,7 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8")] @@ -6778,7 +6778,7 @@ pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> i #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8")] @@ -6796,7 +6796,7 @@ pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vld2_lane_s64::(transmute(a), transmute(b))) } @@ -6809,7 +6809,7 @@ pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld2q_lane_s64::(transmute(a), transmute(b))) } @@ -6822,7 +6822,7 @@ pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vld2q_lane_s8::(transmute(a), transmute(b))) } @@ -6835,7 +6835,7 @@ pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vld2_lane_s64::(transmute(a), transmute(b))) } @@ -6848,7 +6848,7 @@ pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld2q_lane_s64::(transmute(a), transmute(b))) } @@ -6861,7 +6861,7 @@ pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vld2q_lane_s8::(transmute(a), transmute(b))) } @@ -6874,7 +6874,7 @@ pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> p #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8")] @@ -6892,7 +6892,7 @@ pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) -> float64x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8")] @@ -7050,7 +7050,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8")] @@ -7068,7 +7068,7 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8")] @@ -7086,7 +7086,7 @@ pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> i #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8")] @@ -7104,7 +7104,7 @@ pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vld3_lane_s64::(transmute(a), transmute(b))) } @@ -7117,7 +7117,7 @@ pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld3q_lane_s64::(transmute(a), transmute(b))) } @@ -7130,7 +7130,7 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vld3q_lane_s8::(transmute(a), transmute(b))) } @@ -7143,7 +7143,7 @@ pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> p #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vld3q_lane_s8::(transmute(a), transmute(b))) } @@ -7156,7 +7156,7 @@ pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vld3_lane_s64::(transmute(a), transmute(b))) } @@ -7169,7 +7169,7 @@ pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld3q_lane_s64::(transmute(a), transmute(b))) } @@ -7182,7 +7182,7 @@ pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8")] @@ -7200,7 +7200,7 @@ pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) -> float64x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8")] @@ -7358,7 +7358,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8")] @@ -7376,7 +7376,7 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8")] @@ -7394,7 +7394,7 @@ pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> i #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8")] @@ -7412,7 +7412,7 @@ pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vld4_lane_s64::(transmute(a), transmute(b))) } @@ -7425,7 +7425,7 @@ pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld4q_lane_s64::(transmute(a), transmute(b))) } @@ -7438,7 +7438,7 @@ pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vld4q_lane_s8::(transmute(a), transmute(b))) } @@ -7451,7 +7451,7 @@ pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> p #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vld4q_lane_s8::(transmute(a), transmute(b))) } @@ -7464,7 +7464,7 @@ pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vld4_lane_s64::(transmute(a), transmute(b))) } @@ -7477,7 +7477,7 @@ pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld4q_lane_s64::(transmute(a), transmute(b))) } @@ -7490,7 +7490,7 @@ pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8")] @@ -7508,7 +7508,7 @@ pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) -> float64x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8")] @@ -7526,7 +7526,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); } @@ -7539,7 +7539,7 @@ pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -7718,7 +7718,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8")] @@ -7736,7 +7736,7 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8")] @@ -7754,7 +7754,7 @@ pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8")] @@ -7772,7 +7772,7 @@ pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vst2q_lane_s8::(transmute(a), transmute(b))) } @@ -7785,7 +7785,7 @@ pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vst2_lane_s64::(transmute(a), transmute(b))) } @@ -7798,7 +7798,7 @@ pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst2q_lane_s64::(transmute(a), transmute(b))) } @@ -7811,7 +7811,7 @@ pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vst2q_lane_s8::(transmute(a), transmute(b))) } @@ -7824,7 +7824,7 @@ pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vst2_lane_s64::(transmute(a), transmute(b))) } @@ -7837,7 +7837,7 @@ pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst2q_lane_s64::(transmute(a), transmute(b))) } @@ -7850,7 +7850,7 @@ pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8")] @@ -7868,7 +7868,7 @@ pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8")] @@ -7956,7 +7956,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8")] @@ -7974,7 +7974,7 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8")] @@ -7992,7 +7992,7 @@ pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8")] @@ -8010,7 +8010,7 @@ pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vst3q_lane_s8::(transmute(a), transmute(b))) } @@ -8023,7 +8023,7 @@ pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vst3_lane_s64::(transmute(a), transmute(b))) } @@ -8036,7 +8036,7 @@ pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst3q_lane_s64::(transmute(a), transmute(b))) } @@ -8049,7 +8049,7 @@ pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vst3q_lane_s8::(transmute(a), transmute(b))) } @@ -8062,7 +8062,7 @@ pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vst3_lane_s64::(transmute(a), transmute(b))) } @@ -8075,7 +8075,7 @@ pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst3q_lane_s64::(transmute(a), transmute(b))) } @@ -8088,7 +8088,7 @@ pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8")] @@ -8106,7 +8106,7 @@ pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8")] @@ -8194,7 +8194,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8")] @@ -8212,7 +8212,7 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8")] @@ -8230,7 +8230,7 @@ pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8")] @@ -8248,7 +8248,7 @@ pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vst4q_lane_s8::(transmute(a), transmute(b))) } @@ -8261,7 +8261,7 @@ pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vst4_lane_s64::(transmute(a), transmute(b))) } @@ -8274,7 +8274,7 @@ pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst4q_lane_s64::(transmute(a), transmute(b))) } @@ -8287,7 +8287,7 @@ pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); transmute(vst4q_lane_s8::(transmute(a), transmute(b))) } @@ -8300,7 +8300,7 @@ pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); transmute(vst4_lane_s64::(transmute(a), transmute(b))) } @@ -8313,7 +8313,7 @@ pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst4q_lane_s64::(transmute(a), transmute(b))) } @@ -8326,7 +8326,7 @@ pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8")] @@ -8344,7 +8344,7 @@ pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8")] @@ -8406,7 +8406,7 @@ pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_mul(a, transmute::(simd_extract(b, LANE as u32))) } @@ -8419,7 +8419,7 @@ pub unsafe fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, transmute::(simd_extract(b, LANE as u32))) } @@ -8432,7 +8432,7 @@ pub unsafe fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -8445,7 +8445,7 @@ pub unsafe fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -8458,7 +8458,7 @@ pub unsafe fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let b: f32 = simd_extract(b, LANE as u32); a * b } @@ -8472,7 +8472,7 @@ pub unsafe fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let b: f32 = simd_extract(b, LANE as u32); a * b } @@ -8486,7 +8486,7 @@ pub unsafe fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let b: f64 = simd_extract(b, LANE as u32); a * b } @@ -8500,7 +8500,7 @@ pub unsafe fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let b: f64 = simd_extract(b, LANE as u32); a * b } @@ -8676,7 +8676,7 @@ pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8689,7 +8689,7 @@ pub unsafe fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8702,7 +8702,7 @@ pub unsafe fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8715,7 +8715,7 @@ pub unsafe fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8728,7 +8728,7 @@ pub unsafe fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8741,7 +8741,7 @@ pub unsafe fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8754,7 +8754,7 @@ pub unsafe fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8767,7 +8767,7 @@ pub unsafe fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8844,7 +8844,7 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vmulx_f64(a, transmute::(simd_extract(b, LANE as u32))) } @@ -8857,7 +8857,7 @@ pub unsafe fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmulx_f64(a, transmute::(simd_extract(b, LANE as u32))) } @@ -8870,7 +8870,7 @@ pub unsafe fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -8883,7 +8883,7 @@ pub unsafe fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -8896,7 +8896,7 @@ pub unsafe fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8909,7 +8909,7 @@ pub unsafe fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -8922,7 +8922,7 @@ pub unsafe fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -8935,7 +8935,7 @@ pub unsafe fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -8980,7 +8980,7 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmulxs_f32(a, simd_extract(b, LANE as u32)) } @@ -8993,7 +8993,7 @@ pub unsafe fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmulxs_f32(a, simd_extract(b, LANE as u32)) } @@ -9006,7 +9006,7 @@ pub unsafe fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vmulxd_f64(a, simd_extract(b, LANE as u32)) } @@ -9019,7 +9019,7 @@ pub unsafe fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmulxd_f64(a, simd_extract(b, LANE as u32)) } @@ -9086,7 +9086,7 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) } @@ -9099,7 +9099,7 @@ pub unsafe fn vfma_lane_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) } @@ -9112,7 +9112,7 @@ pub unsafe fn vfma_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) } @@ -9125,7 +9125,7 @@ pub unsafe fn vfmaq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) } @@ -9138,7 +9138,7 @@ pub unsafe fn vfmaq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) } @@ -9151,7 +9151,7 @@ pub unsafe fn vfma_lane_f64(a: float64x1_t, b: float64x1_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfma_laneq_f64(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) } @@ -9164,7 +9164,7 @@ pub unsafe fn vfma_laneq_f64(a: float64x1_t, b: float64x1_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) } @@ -9177,7 +9177,7 @@ pub unsafe fn vfmaq_lane_f64(a: float64x2_t, b: float64x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmaq_laneq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) } @@ -9195,7 +9195,7 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")] fn vfmas_lane_f32_(a: f32, b: f32, c: f32) -> f32; } - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: f32 = simd_extract(c, LANE as u32); vfmas_lane_f32_(b, c, a) } @@ -9214,7 +9214,7 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")] fn vfmas_laneq_f32_(a: f32, b: f32, c: f32) -> f32; } - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: f32 = simd_extract(c, LANE as u32); vfmas_laneq_f32_(b, c, a) } @@ -9233,7 +9233,7 @@ pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")] fn vfmad_lane_f64_(a: f64, b: f64, c: f64) -> f64; } - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: f64 = simd_extract(c, LANE as u32); vfmad_lane_f64_(b, c, a) } @@ -9252,7 +9252,7 @@ pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")] fn vfmad_laneq_f64_(a: f64, b: f64, c: f64) -> f64; } - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: f64 = simd_extract(c, LANE as u32); vfmad_laneq_f64_(b, c, a) } @@ -9312,7 +9312,7 @@ pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) } @@ -9325,7 +9325,7 @@ pub unsafe fn vfms_lane_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32))) } @@ -9338,7 +9338,7 @@ pub unsafe fn vfms_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) } @@ -9351,7 +9351,7 @@ pub unsafe fn vfmsq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32))) } @@ -9364,7 +9364,7 @@ pub unsafe fn vfmsq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) } @@ -9377,7 +9377,7 @@ pub unsafe fn vfms_lane_f64(a: float64x1_t, b: float64x1_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfms_laneq_f64(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32))) } @@ -9390,7 +9390,7 @@ pub unsafe fn vfms_laneq_f64(a: float64x1_t, b: float64x1_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) } @@ -9403,7 +9403,7 @@ pub unsafe fn vfmsq_lane_f64(a: float64x2_t, b: float64x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vfmsq_laneq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32))) } @@ -10301,7 +10301,7 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_f32(a, b, c) } @@ -10314,7 +10314,7 @@ pub unsafe fn vcmla_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_f32(a, b, c) } @@ -10327,7 +10327,7 @@ pub unsafe fn vcmla_laneq_f32(a: float32x2_t, b: float32x2_t, c #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_f32(a, b, c) } @@ -10340,7 +10340,7 @@ pub unsafe fn vcmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_f32(a, b, c) } @@ -10353,7 +10353,7 @@ pub unsafe fn vcmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_rot90_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot90_f32(a, b, c) } @@ -10366,7 +10366,7 @@ pub unsafe fn vcmla_rot90_lane_f32(a: float32x2_t, b: float32x2 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_rot90_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot90_f32(a, b, c) } @@ -10379,7 +10379,7 @@ pub unsafe fn vcmla_rot90_laneq_f32(a: float32x2_t, b: float32x #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_rot90_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_rot90_f32(a, b, c) } @@ -10392,7 +10392,7 @@ pub unsafe fn vcmlaq_rot90_lane_f32(a: float32x4_t, b: float32x #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_rot90_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_rot90_f32(a, b, c) } @@ -10405,7 +10405,7 @@ pub unsafe fn vcmlaq_rot90_laneq_f32(a: float32x4_t, b: float32 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_rot180_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot180_f32(a, b, c) } @@ -10418,7 +10418,7 @@ pub unsafe fn vcmla_rot180_lane_f32(a: float32x2_t, b: float32x #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_rot180_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot180_f32(a, b, c) } @@ -10431,7 +10431,7 @@ pub unsafe fn vcmla_rot180_laneq_f32(a: float32x2_t, b: float32 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_rot180_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_rot180_f32(a, b, c) } @@ -10444,7 +10444,7 @@ pub unsafe fn vcmlaq_rot180_lane_f32(a: float32x4_t, b: float32 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_rot180_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_rot180_f32(a, b, c) } @@ -10457,7 +10457,7 @@ pub unsafe fn vcmlaq_rot180_laneq_f32(a: float32x4_t, b: float3 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_rot270_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot270_f32(a, b, c) } @@ -10470,7 +10470,7 @@ pub unsafe fn vcmla_rot270_lane_f32(a: float32x2_t, b: float32x #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmla_rot270_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); vcmla_rot270_f32(a, b, c) } @@ -10483,7 +10483,7 @@ pub unsafe fn vcmla_rot270_laneq_f32(a: float32x2_t, b: float32 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_rot270_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_rot270_f32(a, b, c) } @@ -10496,7 +10496,7 @@ pub unsafe fn vcmlaq_rot270_lane_f32(a: float32x4_t, b: float32 #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vcmlaq_rot270_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); vcmlaq_rot270_f32(a, b, c) } @@ -10569,7 +10569,7 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 #[cfg_attr(test, assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdot_s32(a, b, c) } @@ -10582,7 +10582,7 @@ pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x #[cfg_attr(test, assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdot_s32(a, b, c) } @@ -10595,7 +10595,7 @@ pub unsafe fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8 #[cfg_attr(test, assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdotq_s32(a, b, c) } @@ -10608,7 +10608,7 @@ pub unsafe fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int #[cfg_attr(test, assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdotq_s32(a, b, c) } @@ -10621,7 +10621,7 @@ pub unsafe fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: in #[cfg_attr(test, assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdot_u32(a, b, c) } @@ -10634,7 +10634,7 @@ pub unsafe fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uin #[cfg_attr(test, assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdot_u32(a, b, c) } @@ -10647,7 +10647,7 @@ pub unsafe fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: ui #[cfg_attr(test, assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdotq_u32(a, b, c) } @@ -10660,7 +10660,7 @@ pub unsafe fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: u #[cfg_attr(test, assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]); vdotq_u32(a, b, c) } @@ -11333,7 +11333,7 @@ pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) } @@ -11347,7 +11347,7 @@ pub unsafe fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) } @@ -11361,7 +11361,7 @@ pub unsafe fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let b: i16 = simd_extract(b, N as u32); vqdmullh_s16(a, b) } @@ -11375,7 +11375,7 @@ pub unsafe fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let b: i16 = simd_extract(b, N as u32); vqdmullh_s16(a, b) } @@ -11389,7 +11389,7 @@ pub unsafe fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); let b: i32 = simd_extract(b, N as u32); vqdmulls_s32(a, b) } @@ -11403,7 +11403,7 @@ pub unsafe fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let b: i32 = simd_extract(b, N as u32); vqdmulls_s32(a, b) } @@ -11417,7 +11417,7 @@ pub unsafe fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) @@ -11432,7 +11432,7 @@ pub unsafe fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) @@ -11447,7 +11447,7 @@ pub unsafe fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) @@ -11462,7 +11462,7 @@ pub unsafe fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) @@ -11521,7 +11521,7 @@ pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqaddq_s32(a, vqdmull_laneq_s16::(b, c)) } @@ -11534,7 +11534,7 @@ pub unsafe fn vqdmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqaddq_s64(a, vqdmull_laneq_s32::(b, c)) } @@ -11547,7 +11547,7 @@ pub unsafe fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqaddq_s32(a, vqdmull_high_lane_s16::(b, c)) } @@ -11560,7 +11560,7 @@ pub unsafe fn vqdmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqaddq_s32(a, vqdmull_high_laneq_s16::(b, c)) } @@ -11573,7 +11573,7 @@ pub unsafe fn vqdmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); vqaddq_s64(a, vqdmull_high_lane_s32::(b, c)) } @@ -11586,7 +11586,7 @@ pub unsafe fn vqdmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqaddq_s64(a, vqdmull_high_laneq_s32::(b, c)) } @@ -11623,7 +11623,7 @@ pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmlalh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -11636,7 +11636,7 @@ pub unsafe fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqdmlalh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -11649,7 +11649,7 @@ pub unsafe fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqdmlals_s32(a, b, simd_extract(c, LANE as u32)) } @@ -11662,7 +11662,7 @@ pub unsafe fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmlals_s32(a, b, simd_extract(c, LANE as u32)) } @@ -11719,7 +11719,7 @@ pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_ #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqsubq_s32(a, vqdmull_laneq_s16::(b, c)) } @@ -11732,7 +11732,7 @@ pub unsafe fn vqdmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqsubq_s64(a, vqdmull_laneq_s32::(b, c)) } @@ -11745,7 +11745,7 @@ pub unsafe fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqsubq_s32(a, vqdmull_high_lane_s16::(b, c)) } @@ -11758,7 +11758,7 @@ pub unsafe fn vqdmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqsubq_s32(a, vqdmull_high_laneq_s16::(b, c)) } @@ -11771,7 +11771,7 @@ pub unsafe fn vqdmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); vqsubq_s64(a, vqdmull_high_lane_s32::(b, c)) } @@ -11784,7 +11784,7 @@ pub unsafe fn vqdmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqsubq_s64(a, vqdmull_high_laneq_s32::(b, c)) } @@ -11821,7 +11821,7 @@ pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmlslh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -11834,7 +11834,7 @@ pub unsafe fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqdmlslh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -11847,7 +11847,7 @@ pub unsafe fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqdmlsls_s32(a, b, simd_extract(c, LANE as u32)) } @@ -11860,7 +11860,7 @@ pub unsafe fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) - #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmlsls_s32(a, b, simd_extract(c, LANE as u32)) } @@ -11899,7 +11899,7 @@ pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let b: i16 = simd_extract(b, N as u32); vqdmulhh_s16(a, b) } @@ -11913,7 +11913,7 @@ pub unsafe fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let b: i16 = simd_extract(b, N as u32); vqdmulhh_s16(a, b) } @@ -11927,7 +11927,7 @@ pub unsafe fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); let b: i32 = simd_extract(b, N as u32); vqdmulhs_s32(a, b) } @@ -11941,7 +11941,7 @@ pub unsafe fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let b: i32 = simd_extract(b, N as u32); vqdmulhs_s32(a, b) } @@ -11955,7 +11955,7 @@ pub unsafe fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) } @@ -11968,7 +11968,7 @@ pub unsafe fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> i #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) } @@ -11981,7 +11981,7 @@ pub unsafe fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) } @@ -11994,7 +11994,7 @@ pub unsafe fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> i #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) } @@ -12237,7 +12237,7 @@ pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqrdmulhh_s16(a, simd_extract(b, LANE as u32)) } @@ -12250,7 +12250,7 @@ pub unsafe fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqrdmulhh_s16(a, simd_extract(b, LANE as u32)) } @@ -12263,7 +12263,7 @@ pub unsafe fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqrdmulhs_s32(a, simd_extract(b, LANE as u32)) } @@ -12276,7 +12276,7 @@ pub unsafe fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqrdmulhs_s32(a, simd_extract(b, LANE as u32)) } @@ -12381,7 +12381,7 @@ pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlah_s16(a, b, c) } @@ -12395,7 +12395,7 @@ pub unsafe fn vqrdmlah_lane_s16(a: int16x4_t, b: int16x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlah_s16(a, b, c) } @@ -12409,7 +12409,7 @@ pub unsafe fn vqrdmlah_laneq_s16(a: int16x4_t, b: int16x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlahq_s16(a, b, c) } @@ -12423,7 +12423,7 @@ pub unsafe fn vqrdmlahq_lane_s16(a: int16x8_t, b: int16x8_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlahq_s16(a, b, c) } @@ -12437,7 +12437,7 @@ pub unsafe fn vqrdmlahq_laneq_s16(a: int16x8_t, b: int16x8_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlah_s32(a, b, c) } @@ -12451,7 +12451,7 @@ pub unsafe fn vqrdmlah_lane_s32(a: int32x2_t, b: int32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlah_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlah_s32(a, b, c) } @@ -12465,7 +12465,7 @@ pub unsafe fn vqrdmlah_laneq_s32(a: int32x2_t, b: int32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlahq_s32(a, b, c) } @@ -12479,7 +12479,7 @@ pub unsafe fn vqrdmlahq_lane_s32(a: int32x4_t, b: int32x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlahq_s32(a, b, c) } @@ -12493,7 +12493,7 @@ pub unsafe fn vqrdmlahq_laneq_s32(a: int32x4_t, b: int32x4_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqrdmlahh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -12506,7 +12506,7 @@ pub unsafe fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqrdmlahh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -12519,7 +12519,7 @@ pub unsafe fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqrdmlahs_s32(a, b, simd_extract(c, LANE as u32)) } @@ -12532,7 +12532,7 @@ pub unsafe fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqrdmlahs_s32(a, b, simd_extract(c, LANE as u32)) } @@ -12637,7 +12637,7 @@ pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlsh_s16(a, b, c) } @@ -12651,7 +12651,7 @@ pub unsafe fn vqrdmlsh_lane_s16(a: int16x4_t, b: int16x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlsh_s16(a, b, c) } @@ -12665,7 +12665,7 @@ pub unsafe fn vqrdmlsh_laneq_s16(a: int16x4_t, b: int16x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlshq_s16(a, b, c) } @@ -12679,7 +12679,7 @@ pub unsafe fn vqrdmlshq_lane_s16(a: int16x8_t, b: int16x8_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlshq_s16(a, b, c) } @@ -12693,7 +12693,7 @@ pub unsafe fn vqrdmlshq_laneq_s16(a: int16x8_t, b: int16x8_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlsh_s32(a, b, c) } @@ -12707,7 +12707,7 @@ pub unsafe fn vqrdmlsh_lane_s32(a: int32x2_t, b: int32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlsh_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); vqrdmlsh_s32(a, b, c) } @@ -12721,7 +12721,7 @@ pub unsafe fn vqrdmlsh_laneq_s32(a: int32x2_t, b: int32x2_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlshq_s32(a, b, c) } @@ -12735,7 +12735,7 @@ pub unsafe fn vqrdmlshq_lane_s32(a: int32x4_t, b: int32x4_t, c: #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmlshq_s32(a, b, c) } @@ -12749,7 +12749,7 @@ pub unsafe fn vqrdmlshq_laneq_s32(a: int32x4_t, b: int32x4_t, c #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqrdmlshh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -12762,7 +12762,7 @@ pub unsafe fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqrdmlshh_s16(a, b, simd_extract(c, LANE as u32)) } @@ -12775,7 +12775,7 @@ pub unsafe fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vqrdmlshs_s32(a, b, simd_extract(c, LANE as u32)) } @@ -12788,7 +12788,7 @@ pub unsafe fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) #[rustc_legacy_const_generics(3)] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqrdmlshs_s32(a, b, simd_extract(c, LANE as u32)) } @@ -12917,7 +12917,7 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let a: int16x8_t = vdupq_n_s16(a); simd_extract(vqrshrn_n_s16::(a), 0) } @@ -12931,7 +12931,7 @@ pub unsafe fn vqrshrnh_n_s16(a: i16) -> i8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let a: int32x4_t = vdupq_n_s32(a); simd_extract(vqrshrn_n_s32::(a), 0) } @@ -12945,7 +12945,7 @@ pub unsafe fn vqrshrns_n_s32(a: i32) -> i16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let a: int64x2_t = vdupq_n_s64(a); simd_extract(vqrshrn_n_s64::(a), 0) } @@ -12959,7 +12959,7 @@ pub unsafe fn vqrshrnd_n_s64(a: i64) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vqrshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -12972,7 +12972,7 @@ pub unsafe fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -12985,7 +12985,7 @@ pub unsafe fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> in #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqrshrn_n_s64::(b), [0, 1, 2, 3]) } @@ -12998,7 +12998,7 @@ pub unsafe fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> in #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let a: uint16x8_t = vdupq_n_u16(a); simd_extract(vqrshrn_n_u16::(a), 0) } @@ -13012,7 +13012,7 @@ pub unsafe fn vqrshrnh_n_u16(a: u16) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let a: uint32x4_t = vdupq_n_u32(a); simd_extract(vqrshrn_n_u32::(a), 0) } @@ -13026,7 +13026,7 @@ pub unsafe fn vqrshrns_n_u32(a: u32) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let a: uint64x2_t = vdupq_n_u64(a); simd_extract(vqrshrn_n_u64::(a), 0) } @@ -13040,7 +13040,7 @@ pub unsafe fn vqrshrnd_n_u64(a: u64) -> u32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vqrshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -13053,7 +13053,7 @@ pub unsafe fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -13066,7 +13066,7 @@ pub unsafe fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqrshrn_n_u64::(b), [0, 1, 2, 3]) } @@ -13079,7 +13079,7 @@ pub unsafe fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let a: int16x8_t = vdupq_n_s16(a); simd_extract(vqrshrun_n_s16::(a), 0) } @@ -13093,7 +13093,7 @@ pub unsafe fn vqrshrunh_n_s16(a: i16) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let a: int32x4_t = vdupq_n_s32(a); simd_extract(vqrshrun_n_s32::(a), 0) } @@ -13107,7 +13107,7 @@ pub unsafe fn vqrshruns_n_s32(a: i32) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let a: int64x2_t = vdupq_n_s64(a); simd_extract(vqrshrun_n_s64::(a), 0) } @@ -13121,7 +13121,7 @@ pub unsafe fn vqrshrund_n_s64(a: i64) -> u32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vqrshrun_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -13134,7 +13134,7 @@ pub unsafe fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqrshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -13147,7 +13147,7 @@ pub unsafe fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqrshrun_n_s64::(b), [0, 1, 2, 3]) } @@ -13264,7 +13264,7 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(vqshl_n_s8::(vdup_n_s8(a)), 0) } @@ -13277,7 +13277,7 @@ pub unsafe fn vqshlb_n_s8(a: i8) -> i8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_extract(vqshl_n_s16::(vdup_n_s16(a)), 0) } @@ -13290,7 +13290,7 @@ pub unsafe fn vqshlh_n_s16(a: i16) -> i16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshls_n_s32(a: i32) -> i32 { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_extract(vqshl_n_s32::(vdup_n_s32(a)), 0) } @@ -13303,7 +13303,7 @@ pub unsafe fn vqshls_n_s32(a: i32) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_n_s64(a: i64) -> i64 { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_extract(vqshl_n_s64::(vdup_n_s64(a)), 0) } @@ -13316,7 +13316,7 @@ pub unsafe fn vqshld_n_s64(a: i64) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(vqshl_n_u8::(vdup_n_u8(a)), 0) } @@ -13329,7 +13329,7 @@ pub unsafe fn vqshlb_n_u8(a: u8) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_extract(vqshl_n_u16::(vdup_n_u16(a)), 0) } @@ -13342,7 +13342,7 @@ pub unsafe fn vqshlh_n_u16(a: u16) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshls_n_u32(a: u32) -> u32 { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_extract(vqshl_n_u32::(vdup_n_u32(a)), 0) } @@ -13355,7 +13355,7 @@ pub unsafe fn vqshls_n_u32(a: u32) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshld_n_u64(a: u64) -> u64 { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_extract(vqshl_n_u64::(vdup_n_u64(a)), 0) } @@ -13368,7 +13368,7 @@ pub unsafe fn vqshld_n_u64(a: u64) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_extract(vqshlu_n_s8::(vdup_n_s8(a)), 0) } @@ -13381,7 +13381,7 @@ pub unsafe fn vqshlub_n_s8(a: i8) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_extract(vqshlu_n_s16::(vdup_n_s16(a)), 0) } @@ -13394,7 +13394,7 @@ pub unsafe fn vqshluh_n_s16(a: i16) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_extract(vqshlu_n_s32::(vdup_n_s32(a)), 0) } @@ -13407,7 +13407,7 @@ pub unsafe fn vqshlus_n_s32(a: i32) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_extract(vqshlu_n_s64::(vdup_n_s64(a)), 0) } @@ -13420,7 +13420,7 @@ pub unsafe fn vqshlud_n_s64(a: i64) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.i32")] @@ -13438,7 +13438,7 @@ pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_extract(vqshrn_n_s16::(vdupq_n_s16(a)), 0) } @@ -13451,7 +13451,7 @@ pub unsafe fn vqshrnh_n_s16(a: i16) -> i8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_extract(vqshrn_n_s32::(vdupq_n_s32(a)), 0) } @@ -13464,7 +13464,7 @@ pub unsafe fn vqshrns_n_s32(a: i32) -> i16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vqshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -13477,7 +13477,7 @@ pub unsafe fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -13490,7 +13490,7 @@ pub unsafe fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqshrn_n_s64::(b), [0, 1, 2, 3]) } @@ -13503,7 +13503,7 @@ pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.i32")] @@ -13521,7 +13521,7 @@ pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_extract(vqshrn_n_u16::(vdupq_n_u16(a)), 0) } @@ -13534,7 +13534,7 @@ pub unsafe fn vqshrnh_n_u16(a: u16) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_extract(vqshrn_n_u32::(vdupq_n_u32(a)), 0) } @@ -13547,7 +13547,7 @@ pub unsafe fn vqshrns_n_u32(a: u32) -> u16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vqshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -13560,7 +13560,7 @@ pub unsafe fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -13573,7 +13573,7 @@ pub unsafe fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqshrn_n_u64::(b), [0, 1, 2, 3]) } @@ -13586,7 +13586,7 @@ pub unsafe fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_extract(vqshrun_n_s16::(vdupq_n_s16(a)), 0) } @@ -13599,7 +13599,7 @@ pub unsafe fn vqshrunh_n_s16(a: i16) -> u8 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_extract(vqshrun_n_s32::(vdupq_n_s32(a)), 0) } @@ -13612,7 +13612,7 @@ pub unsafe fn vqshruns_n_s32(a: i32) -> u16 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_extract(vqshrun_n_s64::(vdupq_n_s64(a)), 0) } @@ -13625,7 +13625,7 @@ pub unsafe fn vqshrund_n_s64(a: i64) -> u32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vqshrun_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -13638,7 +13638,7 @@ pub unsafe fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> ui #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vqshrun_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -13651,7 +13651,7 @@ pub unsafe fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vqshrun_n_s64::(b), [0, 1, 2, 3]) } @@ -14764,7 +14764,7 @@ pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vrshld_s64(a, -N as i64) } @@ -14777,7 +14777,7 @@ pub unsafe fn vrshrd_n_s64(a: i64) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vrshld_u64(a, -N as i64) } @@ -14790,7 +14790,7 @@ pub unsafe fn vrshrd_n_u64(a: u64) -> u64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vrshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -14803,7 +14803,7 @@ pub unsafe fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vrshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -14816,7 +14816,7 @@ pub unsafe fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vrshrn_n_s64::(b), [0, 1, 2, 3]) } @@ -14829,7 +14829,7 @@ pub unsafe fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vrshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -14842,7 +14842,7 @@ pub unsafe fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> ui #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vrshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -14855,7 +14855,7 @@ pub unsafe fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vrshrn_n_u64::(b), [0, 1, 2, 3]) } @@ -14868,7 +14868,7 @@ pub unsafe fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let b: i64 = vrshrd_n_s64::(b); a.wrapping_add(b) } @@ -14882,7 +14882,7 @@ pub unsafe fn vrsrad_n_s64(a: i64, b: i64) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrsrad_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let b: u64 = vrshrd_n_u64::(b); a.wrapping_add(b) } @@ -14968,7 +14968,7 @@ pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) } @@ -14981,7 +14981,7 @@ pub unsafe fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -15016,7 +15016,7 @@ pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { - static_assert!(N : i32 where N >= 0 && N <= 8); + static_assert!(N >= 0 && N <= 8); let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vshll_n_s8::(b) } @@ -15030,7 +15030,7 @@ pub unsafe fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { - static_assert!(N : i32 where N >= 0 && N <= 16); + static_assert!(N >= 0 && N <= 16); let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); vshll_n_s16::(b) } @@ -15044,7 +15044,7 @@ pub unsafe fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { - static_assert!(N : i32 where N >= 0 && N <= 32); + static_assert!(N >= 0 && N <= 32); let b: int32x2_t = simd_shuffle!(a, a, [2, 3]); vshll_n_s32::(b) } @@ -15058,7 +15058,7 @@ pub unsafe fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 0 && N <= 8); + static_assert!(N >= 0 && N <= 8); let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); vshll_n_u8::(b) } @@ -15072,7 +15072,7 @@ pub unsafe fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 0 && N <= 16); + static_assert!(N >= 0 && N <= 16); let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); vshll_n_u16::(b) } @@ -15086,7 +15086,7 @@ pub unsafe fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 0 && N <= 32); + static_assert!(N >= 0 && N <= 32); let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]); vshll_n_u32::(b) } @@ -15100,7 +15100,7 @@ pub unsafe fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vshrn_n_s16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -15113,7 +15113,7 @@ pub unsafe fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vshrn_n_s32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -15126,7 +15126,7 @@ pub unsafe fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int1 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vshrn_n_s64::(b), [0, 1, 2, 3]) } @@ -15139,7 +15139,7 @@ pub unsafe fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int3 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_shuffle!(a, vshrn_n_u16::(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } @@ -15152,7 +15152,7 @@ pub unsafe fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uin #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_shuffle!(a, vshrn_n_u32::(b), [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -15165,7 +15165,7 @@ pub unsafe fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> ui #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_shuffle!(a, vshrn_n_u64::(b), [0, 1, 2, 3]) } @@ -17073,7 +17073,7 @@ pub unsafe fn vqabsd_s64(a: i64) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N : i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vsli_n_s64::(transmute(a), transmute(b))) } @@ -17086,7 +17086,7 @@ pub unsafe fn vslid_n_s64(a: i64, b: i64) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N : i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vsli_n_u64::(transmute(a), transmute(b))) } @@ -17099,7 +17099,7 @@ pub unsafe fn vslid_n_u64(a: u64, b: u64) -> u64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); transmute(vsri_n_s64::(transmute(a), transmute(b))) } @@ -17112,7 +17112,7 @@ pub unsafe fn vsrid_n_s64(a: i64, b: i64) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); transmute(vsri_n_u64::(transmute(a), transmute(b))) } diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index b28fd33eaf33..850657033774 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -373,8 +373,8 @@ pub unsafe fn vcopy_lane_s64( _a: int64x1_t, b: int64x1_t, ) -> int64x1_t { - static_assert!(N1 : i32 where N1 == 0); - static_assert!(N2 : i32 where N2 == 0); + static_assert!(N1 == 0); + static_assert!(N2 == 0); b } @@ -388,8 +388,8 @@ pub unsafe fn vcopy_lane_u64( _a: uint64x1_t, b: uint64x1_t, ) -> uint64x1_t { - static_assert!(N1 : i32 where N1 == 0); - static_assert!(N2 : i32 where N2 == 0); + static_assert!(N1 == 0); + static_assert!(N2 == 0); b } @@ -403,8 +403,8 @@ pub unsafe fn vcopy_lane_p64( _a: poly64x1_t, b: poly64x1_t, ) -> poly64x1_t { - static_assert!(N1 : i32 where N1 == 0); - static_assert!(N2 : i32 where N2 == 0); + static_assert!(N1 == 0); + static_assert!(N2 == 0); b } @@ -418,8 +418,8 @@ pub unsafe fn vcopy_lane_f64( _a: float64x1_t, b: float64x1_t, ) -> float64x1_t { - static_assert!(N1 : i32 where N1 == 0); - static_assert!(N2 : i32 where N2 == 0); + static_assert!(N1 == 0); + static_assert!(N2 == 0); b } @@ -433,8 +433,8 @@ pub unsafe fn vcopy_laneq_s64( _a: int64x1_t, b: int64x2_t, ) -> int64x1_t { - static_assert!(LANE1 : i32 where LANE1 == 0); - static_assert_imm1!(LANE2); + static_assert!(LANE1 == 0); + static_assert_uimm_bits!(LANE2, 1); transmute::(simd_extract(b, LANE2 as u32)) } @@ -448,8 +448,8 @@ pub unsafe fn vcopy_laneq_u64( _a: uint64x1_t, b: uint64x2_t, ) -> uint64x1_t { - static_assert!(LANE1 : i32 where LANE1 == 0); - static_assert_imm1!(LANE2); + static_assert!(LANE1 == 0); + static_assert_uimm_bits!(LANE2, 1); transmute::(simd_extract(b, LANE2 as u32)) } @@ -463,8 +463,8 @@ pub unsafe fn vcopy_laneq_p64( _a: poly64x1_t, b: poly64x2_t, ) -> poly64x1_t { - static_assert!(LANE1 : i32 where LANE1 == 0); - static_assert_imm1!(LANE2); + static_assert!(LANE1 == 0); + static_assert_uimm_bits!(LANE2, 1); transmute::(simd_extract(b, LANE2 as u32)) } @@ -478,8 +478,8 @@ pub unsafe fn vcopy_laneq_f64( _a: float64x1_t, b: float64x2_t, ) -> float64x1_t { - static_assert!(LANE1 : i32 where LANE1 == 0); - static_assert_imm1!(LANE2); + static_assert!(LANE1 == 0); + static_assert_uimm_bits!(LANE2, 1); transmute::(simd_extract(b, LANE2 as u32)) } @@ -747,7 +747,7 @@ pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t { #[cfg_attr(test, assert_instr(ldr, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) -> float64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) } @@ -758,7 +758,7 @@ pub unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) #[cfg_attr(test, assert_instr(ld1, LANE = 1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld1q_lane_f64(ptr: *const f64, src: float64x2_t) -> float64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1950,7 +1950,7 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -1961,7 +1961,7 @@ pub unsafe fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_ #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vext_f64(a: float64x1_t, _b: float64x1_t) -> float64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -2080,7 +2080,7 @@ pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, IMM5 = 0))] pub unsafe fn vget_lane_f64(v: float64x1_t) -> f64 { - static_assert!(IMM5 : i32 where IMM5 == 0); + static_assert!(IMM5 == 0); simd_extract(v, IMM5 as u32) } @@ -2091,7 +2091,7 @@ pub unsafe fn vget_lane_f64(v: float64x1_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, IMM5 = 0))] pub unsafe fn vgetq_lane_f64(v: float64x2_t) -> f64 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -3001,7 +3001,7 @@ pub unsafe fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> pol #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshld_n_s64(a: i64) -> i64 { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); a << N } @@ -3012,7 +3012,7 @@ pub unsafe fn vshld_n_s64(a: i64) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshld_n_u64(a: u64) -> u64 { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); a << N } @@ -3023,7 +3023,7 @@ pub unsafe fn vshld_n_u64(a: u64) -> u64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrd_n_s64(a: i64) -> i64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; a >> n } @@ -3035,7 +3035,7 @@ pub unsafe fn vshrd_n_s64(a: i64) -> i64 { #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vshrd_n_u64(a: u64) -> u64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { return 0; } else { @@ -3051,7 +3051,7 @@ pub unsafe fn vshrd_n_u64(a: u64) -> u64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrad_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); a.wrapping_add(vshrd_n_s64::(b)) } @@ -3062,7 +3062,7 @@ pub unsafe fn vsrad_n_s64(a: i64, b: i64) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsrad_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); a.wrapping_add(vshrd_n_u64::(b)) } @@ -3073,7 +3073,7 @@ pub unsafe fn vsrad_n_u64(a: u64, b: u64) -> u64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vsli_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3083,7 +3083,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vsliq_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3093,7 +3093,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); vsli_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3103,7 +3103,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); vsliq_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3113,7 +3113,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); vsli_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3123,7 +3123,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); vsliq_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3133,7 +3133,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N: i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); vsli_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3143,7 +3143,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N: i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); vsliq_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) @@ -3153,7 +3153,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3163,7 +3163,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3173,7 +3173,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3183,7 +3183,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3193,7 +3193,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); transmute(vsli_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3203,7 +3203,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); transmute(vsliq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3213,7 +3213,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N: i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vsli_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3223,7 +3223,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N: i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3233,7 +3233,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3243,7 +3243,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3253,7 +3253,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) @@ -3263,7 +3263,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } @@ -3276,7 +3276,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N: i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vsli_n_s64_(transmute(a), transmute(b), N)) } @@ -3289,7 +3289,7 @@ pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N: i32 where N >= 0 && N <= 63); + static_assert!(N >= 0 && N <= 63); transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3299,7 +3299,7 @@ pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N: i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); vsri_n_s8_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3309,7 +3309,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N: i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); vsriq_n_s8_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3319,7 +3319,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N: i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); vsri_n_s16_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3329,7 +3329,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N: i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); vsriq_n_s16_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3339,7 +3339,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N: i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); vsri_n_s32_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3349,7 +3349,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N: i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); vsriq_n_s32_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3359,7 +3359,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N: i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vsri_n_s64_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3369,7 +3369,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N: i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vsriq_n_s64_(a, b, N) } /// Shift Right and Insert (immediate) @@ -3379,7 +3379,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N: i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); transmute(vsri_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3389,7 +3389,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N: i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3399,7 +3399,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N: i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); transmute(vsri_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3409,7 +3409,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N: i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3419,7 +3419,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N: i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); transmute(vsri_n_s32_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3429,7 +3429,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N: i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); transmute(vsriq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3439,7 +3439,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N: i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); transmute(vsri_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3449,7 +3449,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N: i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3459,7 +3459,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N: i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); transmute(vsri_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3469,7 +3469,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N: i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); transmute(vsriq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3479,7 +3479,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N: i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); transmute(vsri_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) @@ -3489,7 +3489,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N: i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); transmute(vsriq_n_s16_(transmute(a), transmute(b), N)) } @@ -3502,7 +3502,7 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N: i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); transmute(vsri_n_s64_(transmute(a), transmute(b), N)) } @@ -3515,7 +3515,7 @@ pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N: i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); transmute(vsriq_n_s64_(transmute(a), transmute(b), N)) } @@ -3529,7 +3529,7 @@ pub unsafe fn vsm3tt1aq_u32( b: uint32x4_t, c: uint32x4_t, ) -> uint32x4_t { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt1a")] @@ -3548,7 +3548,7 @@ pub unsafe fn vsm3tt1bq_u32( b: uint32x4_t, c: uint32x4_t, ) -> uint32x4_t { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt1b")] @@ -3567,7 +3567,7 @@ pub unsafe fn vsm3tt2aq_u32( b: uint32x4_t, c: uint32x4_t, ) -> uint32x4_t { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt2a")] @@ -3586,7 +3586,7 @@ pub unsafe fn vsm3tt2bq_u32( b: uint32x4_t, c: uint32x4_t, ) -> uint32x4_t { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt2b")] @@ -3601,7 +3601,7 @@ pub unsafe fn vsm3tt2bq_u32( #[cfg_attr(test, assert_instr(xar, IMM6 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.xar")] diff --git a/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs b/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs index 3ae0ef506b30..0e2e39cc2e2f 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs @@ -67,7 +67,7 @@ pub const _PREFETCH_LOCALITY3: i32 = 3; // FIXME: Replace this with the standard ACLE __pld/__pldx/__pli/__plix intrinsics pub unsafe fn _prefetch(p: *const i8) { // We use the `llvm.prefetch` intrinsic with `cache type` = 1 (data cache). - static_assert_imm1!(RW); - static_assert_imm2!(LOCALITY); + static_assert_uimm_bits!(RW, 1); + static_assert_uimm_bits!(LOCALITY, 2); prefetch(p, RW, LOCALITY, 1); } diff --git a/library/stdarch/crates/core_arch/src/aarch64/tme.rs b/library/stdarch/crates/core_arch/src/aarch64/tme.rs index d1b2cf334d1c..05df313e4cd1 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/tme.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/tme.rs @@ -96,7 +96,7 @@ pub unsafe fn __tcommit() { #[cfg_attr(test, assert_instr(tcancel, IMM16 = 0x0))] #[rustc_legacy_const_generics(0)] pub unsafe fn __tcancel() { - static_assert!(IMM16: u64 where IMM16 <= 65535); + static_assert!(IMM16 <= 65535); aarch64_tcancel(IMM16); } diff --git a/library/stdarch/crates/core_arch/src/arm/armclang.rs b/library/stdarch/crates/core_arch/src/arm/armclang.rs index e68c02d027d5..e44ee2f4ad7c 100644 --- a/library/stdarch/crates/core_arch/src/arm/armclang.rs +++ b/library/stdarch/crates/core_arch/src/arm/armclang.rs @@ -30,6 +30,6 @@ use stdarch_test::assert_instr; #[inline(always)] #[rustc_legacy_const_generics(0)] pub unsafe fn __breakpoint() { - static_assert_imm8!(VAL); + static_assert_uimm_bits!(VAL, 8); crate::arch::asm!("bkpt #{}", const VAL); } diff --git a/library/stdarch/crates/core_arch/src/arm/mod.rs b/library/stdarch/crates/core_arch/src/arm/mod.rs index efe0068d405b..ec91e5de54ca 100644 --- a/library/stdarch/crates/core_arch/src/arm/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm/mod.rs @@ -103,7 +103,7 @@ pub unsafe fn udf() -> ! { #[inline(always)] #[rustc_legacy_const_generics(0)] pub unsafe fn __dbg() { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); dbg(IMM4); } diff --git a/library/stdarch/crates/core_arch/src/arm/neon.rs b/library/stdarch/crates/core_arch/src/arm/neon.rs index a6291c95cfe3..e1de48538ea4 100644 --- a/library/stdarch/crates/core_arch/src/arm/neon.rs +++ b/library/stdarch/crates/core_arch/src/arm/neon.rs @@ -821,7 +821,7 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let n = N as i8; vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n)) } @@ -831,7 +831,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let n = N as i8; vshiftins_v16i8( a, @@ -845,7 +845,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); let n = N as i16; vshiftins_v4i16(a, b, int16x4_t(n, n, n, n)) } @@ -855,7 +855,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); let n = N as i16; vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n)) } @@ -865,7 +865,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); vshiftins_v2i32(a, b, int32x2_t(N, N)) } /// Shift Left and Insert (immediate) @@ -874,7 +874,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); vshiftins_v4i32(a, b, int32x4_t(N, N, N, N)) } /// Shift Left and Insert (immediate) @@ -883,7 +883,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where 0 <= N && N <= 63); + static_assert!(0 <= N && N <= 63); vshiftins_v1i64(a, b, int64x1_t(N as i64)) } /// Shift Left and Insert (immediate) @@ -892,7 +892,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where 0 <= N && N <= 63); + static_assert!(0 <= N && N <= 63); vshiftins_v2i64(a, b, int64x2_t(N as i64, N as i64)) } /// Shift Left and Insert (immediate) @@ -901,7 +901,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let n = N as i8; transmute(vshiftins_v8i8( transmute(a), @@ -915,7 +915,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let n = N as i8; transmute(vshiftins_v16i8( transmute(a), @@ -929,7 +929,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); let n = N as i16; transmute(vshiftins_v4i16( transmute(a), @@ -943,7 +943,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); let n = N as i16; transmute(vshiftins_v8i16( transmute(a), @@ -957,7 +957,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(N, N))) } /// Shift Left and Insert (immediate) @@ -966,7 +966,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N: i32 where N >= 0 && N <= 31); + static_assert!(N >= 0 && N <= 31); transmute(vshiftins_v4i32( transmute(a), transmute(b), @@ -979,7 +979,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where 0 <= N && N <= 63); + static_assert!(0 <= N && N <= 63); transmute(vshiftins_v1i64( transmute(a), transmute(b), @@ -992,7 +992,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where 0 <= N && N <= 63); + static_assert!(0 <= N && N <= 63); transmute(vshiftins_v2i64( transmute(a), transmute(b), @@ -1005,7 +1005,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let n = N as i8; transmute(vshiftins_v8i8( transmute(a), @@ -1019,7 +1019,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); let n = N as i8; transmute(vshiftins_v16i8( transmute(a), @@ -1033,7 +1033,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); let n = N as i16; transmute(vshiftins_v4i16( transmute(a), @@ -1048,7 +1048,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); let n = N as i16; transmute(vshiftins_v8i16( transmute(a), @@ -1065,7 +1065,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N : i32 where 0 <= N && N <= 63); + static_assert!(0 <= N && N <= 63); transmute(vshiftins_v1i64( transmute(a), transmute(b), @@ -1081,7 +1081,7 @@ pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N : i32 where 0 <= N && N <= 63); + static_assert!(0 <= N && N <= 63); transmute(vshiftins_v2i64( transmute(a), transmute(b), @@ -1094,7 +1094,7 @@ pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N : i32 where 1 <= N && N <= 8); + static_assert!(1 <= N && N <= 8); let n = -N as i8; vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n)) } @@ -1104,7 +1104,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N : i32 where 1 <= N && N <= 8); + static_assert!(1 <= N && N <= 8); let n = -N as i8; vshiftins_v16i8( a, @@ -1118,7 +1118,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N : i32 where 1 <= N && N <= 16); + static_assert!(1 <= N && N <= 16); let n = -N as i16; vshiftins_v4i16(a, b, int16x4_t(n, n, n, n)) } @@ -1128,7 +1128,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N : i32 where 1 <= N && N <= 16); + static_assert!(1 <= N && N <= 16); let n = -N as i16; vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n)) } @@ -1138,7 +1138,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N : i32 where 1 <= N && N <= 32); + static_assert!(1 <= N && N <= 32); vshiftins_v2i32(a, b, int32x2_t(-N, -N)) } /// Shift Right and Insert (immediate) @@ -1147,7 +1147,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N : i32 where 1 <= N && N <= 32); + static_assert!(1 <= N && N <= 32); vshiftins_v4i32(a, b, int32x4_t(-N, -N, -N, -N)) } /// Shift Right and Insert (immediate) @@ -1156,7 +1156,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where 1 <= N && N <= 64); + static_assert!(1 <= N && N <= 64); vshiftins_v1i64(a, b, int64x1_t(-N as i64)) } /// Shift Right and Insert (immediate) @@ -1165,7 +1165,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where 1 <= N && N <= 64); + static_assert!(1 <= N && N <= 64); vshiftins_v2i64(a, b, int64x2_t(-N as i64, -N as i64)) } /// Shift Right and Insert (immediate) @@ -1174,7 +1174,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N : i32 where 1 <= N && N <= 8); + static_assert!(1 <= N && N <= 8); let n = -N as i8; transmute(vshiftins_v8i8( transmute(a), @@ -1188,7 +1188,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N : i32 where 1 <= N && N <= 8); + static_assert!(1 <= N && N <= 8); let n = -N as i8; transmute(vshiftins_v16i8( transmute(a), @@ -1202,7 +1202,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N : i32 where 1 <= N && N <= 16); + static_assert!(1 <= N && N <= 16); let n = -N as i16; transmute(vshiftins_v4i16( transmute(a), @@ -1216,7 +1216,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N : i32 where 1 <= N && N <= 16); + static_assert!(1 <= N && N <= 16); let n = -N as i16; transmute(vshiftins_v8i16( transmute(a), @@ -1230,7 +1230,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N : i32 where 1 <= N && N <= 32); + static_assert!(1 <= N && N <= 32); transmute(vshiftins_v2i32( transmute(a), transmute(b), @@ -1243,7 +1243,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N : i32 where 1 <= N && N <= 32); + static_assert!(1 <= N && N <= 32); transmute(vshiftins_v4i32( transmute(a), transmute(b), @@ -1256,7 +1256,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where 1 <= N && N <= 64); + static_assert!(1 <= N && N <= 64); transmute(vshiftins_v1i64( transmute(a), transmute(b), @@ -1269,7 +1269,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where 1 <= N && N <= 64); + static_assert!(1 <= N && N <= 64); transmute(vshiftins_v2i64( transmute(a), transmute(b), @@ -1282,7 +1282,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(N : i32 where 1 <= N && N <= 8); + static_assert!(1 <= N && N <= 8); let n = -N as i8; transmute(vshiftins_v8i8( transmute(a), @@ -1296,7 +1296,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(N : i32 where 1 <= N && N <= 8); + static_assert!(1 <= N && N <= 8); let n = -N as i8; transmute(vshiftins_v16i8( transmute(a), @@ -1310,7 +1310,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(N : i32 where 1 <= N && N <= 16); + static_assert!(1 <= N && N <= 16); let n = -N as i16; transmute(vshiftins_v4i16( transmute(a), @@ -1324,7 +1324,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(N : i32 where 1 <= N && N <= 16); + static_assert!(1 <= N && N <= 16); let n = -N as i16; transmute(vshiftins_v8i16( transmute(a), @@ -1341,7 +1341,7 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N : i32 where 1 <= N && N <= 64); + static_assert!(1 <= N && N <= 64); transmute(vshiftins_v1i64( transmute(a), transmute(b), @@ -1357,7 +1357,7 @@ pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N : i32 where 1 <= N && N <= 64); + static_assert!(1 <= N && N <= 64); transmute(vshiftins_v2i64( transmute(a), transmute(b), diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 3f016221092f..775811e657c1 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -2875,7 +2875,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32")] @@ -2894,7 +2894,7 @@ vcvt_n_f32_s32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32")] @@ -2912,7 +2912,7 @@ vcvt_n_f32_s32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32")] @@ -2931,7 +2931,7 @@ vcvtq_n_f32_s32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32")] @@ -2949,7 +2949,7 @@ vcvtq_n_f32_s32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32")] @@ -2968,7 +2968,7 @@ vcvt_n_f32_u32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32")] @@ -2986,7 +2986,7 @@ vcvt_n_f32_u32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32")] @@ -3005,7 +3005,7 @@ vcvtq_n_f32_u32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32")] @@ -3023,7 +3023,7 @@ vcvtq_n_f32_u32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32")] @@ -3042,7 +3042,7 @@ vcvt_n_s32_f32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32")] @@ -3060,7 +3060,7 @@ vcvt_n_s32_f32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32")] @@ -3079,7 +3079,7 @@ vcvtq_n_s32_f32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32")] @@ -3097,7 +3097,7 @@ vcvtq_n_s32_f32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32")] @@ -3116,7 +3116,7 @@ vcvt_n_u32_f32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32")] @@ -3134,7 +3134,7 @@ vcvt_n_u32_f32_(a, N) #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32")] @@ -3153,7 +3153,7 @@ vcvtq_n_u32_f32_(a, N) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32")] @@ -3249,7 +3249,7 @@ vcvtq_u32_f32_(a) #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3264,7 +3264,7 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3279,7 +3279,7 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3294,7 +3294,7 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3309,7 +3309,7 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3324,7 +3324,7 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3339,7 +3339,7 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3354,7 +3354,7 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3369,7 +3369,7 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3384,7 +3384,7 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3399,7 +3399,7 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3414,7 +3414,7 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3429,7 +3429,7 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3444,7 +3444,7 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3459,7 +3459,7 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3474,7 +3474,7 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3489,7 +3489,7 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3504,7 +3504,7 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3519,7 +3519,7 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3534,7 +3534,7 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3549,7 +3549,7 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3564,7 +3564,7 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3579,7 +3579,7 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3594,7 +3594,7 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3609,7 +3609,7 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3624,7 +3624,7 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3639,7 +3639,7 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3654,7 +3654,7 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3669,7 +3669,7 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3684,7 +3684,7 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3699,7 +3699,7 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3714,7 +3714,7 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) } @@ -3729,7 +3729,7 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3744,7 +3744,7 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3759,7 +3759,7 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3774,7 +3774,7 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3789,7 +3789,7 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3804,7 +3804,7 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3819,7 +3819,7 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) } @@ -3834,7 +3834,7 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -3849,7 +3849,7 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -3864,7 +3864,7 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -3879,7 +3879,7 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); transmute::(simd_extract(a, N as u32)) } @@ -3894,7 +3894,7 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); transmute::(simd_extract(a, N as u32)) } @@ -3909,7 +3909,7 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), @@ -3934,7 +3934,7 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), @@ -3967,7 +3967,7 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), @@ -3988,7 +3988,7 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), @@ -4013,7 +4013,7 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -4032,7 +4032,7 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), @@ -4053,7 +4053,7 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), @@ -4078,7 +4078,7 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), @@ -4111,7 +4111,7 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), @@ -4132,7 +4132,7 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), @@ -4157,7 +4157,7 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -4176,7 +4176,7 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), @@ -4197,7 +4197,7 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), @@ -4222,7 +4222,7 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), @@ -4255,7 +4255,7 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), @@ -4276,7 +4276,7 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), @@ -4301,7 +4301,7 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -4320,7 +4320,7 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -4339,7 +4339,7 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), @@ -4358,7 +4358,7 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), @@ -4691,7 +4691,7 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmla_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4706,7 +4706,7 @@ pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmla_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4721,7 +4721,7 @@ pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlaq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4736,7 +4736,7 @@ pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlaq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4751,7 +4751,7 @@ pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -4766,7 +4766,7 @@ pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -4781,7 +4781,7 @@ pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlaq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4796,7 +4796,7 @@ pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlaq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4811,7 +4811,7 @@ pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmla_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4826,7 +4826,7 @@ pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmla_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4841,7 +4841,7 @@ pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlaq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4856,7 +4856,7 @@ pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlaq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4871,7 +4871,7 @@ pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -4886,7 +4886,7 @@ pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -4901,7 +4901,7 @@ pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlaq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4916,7 +4916,7 @@ pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlaq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4931,7 +4931,7 @@ pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -4946,7 +4946,7 @@ pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -4961,7 +4961,7 @@ pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlaq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -4976,7 +4976,7 @@ pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlaq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5121,7 +5121,7 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5136,7 +5136,7 @@ pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlal_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5151,7 +5151,7 @@ pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5166,7 +5166,7 @@ pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5181,7 +5181,7 @@ pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5196,7 +5196,7 @@ pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlal_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5211,7 +5211,7 @@ pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5226,7 +5226,7 @@ pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5553,7 +5553,7 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmls_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5568,7 +5568,7 @@ pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmls_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5583,7 +5583,7 @@ pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5598,7 +5598,7 @@ pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlsq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5613,7 +5613,7 @@ pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5628,7 +5628,7 @@ pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5643,7 +5643,7 @@ pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5658,7 +5658,7 @@ pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5673,7 +5673,7 @@ pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmls_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5688,7 +5688,7 @@ pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmls_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5703,7 +5703,7 @@ pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5718,7 +5718,7 @@ pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlsq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5733,7 +5733,7 @@ pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5748,7 +5748,7 @@ pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5763,7 +5763,7 @@ pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5778,7 +5778,7 @@ pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5793,7 +5793,7 @@ pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5808,7 +5808,7 @@ pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -5823,7 +5823,7 @@ pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5838,7 +5838,7 @@ pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5983,7 +5983,7 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -5998,7 +5998,7 @@ pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlsl_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -6013,7 +6013,7 @@ pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -6028,7 +6028,7 @@ pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -6043,7 +6043,7 @@ pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -6058,7 +6058,7 @@ pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmlsl_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -6073,7 +6073,7 @@ pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -6088,7 +6088,7 @@ pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) } @@ -9443,7 +9443,7 @@ vld2q_dup_f32_(a as _) #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")] @@ -9462,7 +9462,7 @@ vld2_lane_s8_(a as _, b.0, b.1, LANE, 1) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8")] @@ -9480,7 +9480,7 @@ vld2_lane_s8_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")] @@ -9499,7 +9499,7 @@ vld2_lane_s16_(a as _, b.0, b.1, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8")] @@ -9517,7 +9517,7 @@ vld2_lane_s16_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")] @@ -9536,7 +9536,7 @@ vld2_lane_s32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8")] @@ -9554,7 +9554,7 @@ vld2_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")] @@ -9573,7 +9573,7 @@ vld2q_lane_s16_(a as _, b.0, b.1, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8")] @@ -9591,7 +9591,7 @@ vld2q_lane_s16_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")] @@ -9610,7 +9610,7 @@ vld2q_lane_s32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8")] @@ -9630,7 +9630,7 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld2_lane_s8::(transmute(a), transmute(b))) } @@ -9645,7 +9645,7 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld2_lane_s16::(transmute(a), transmute(b))) } @@ -9660,7 +9660,7 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld2_lane_s32::(transmute(a), transmute(b))) } @@ -9675,7 +9675,7 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } @@ -9690,7 +9690,7 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld2q_lane_s32::(transmute(a), transmute(b))) } @@ -9705,7 +9705,7 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld2_lane_s8::(transmute(a), transmute(b))) } @@ -9720,7 +9720,7 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld2_lane_s16::(transmute(a), transmute(b))) } @@ -9735,7 +9735,7 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } @@ -9748,7 +9748,7 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")] @@ -9767,7 +9767,7 @@ vld2_lane_f32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8")] @@ -9785,7 +9785,7 @@ vld2_lane_f32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")] @@ -9804,7 +9804,7 @@ vld2q_lane_f32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8")] @@ -10728,7 +10728,7 @@ vld3q_dup_f32_(a as _) #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")] @@ -10747,7 +10747,7 @@ vld3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8")] @@ -10765,7 +10765,7 @@ vld3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")] @@ -10784,7 +10784,7 @@ vld3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8")] @@ -10802,7 +10802,7 @@ vld3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")] @@ -10821,7 +10821,7 @@ vld3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8")] @@ -10839,7 +10839,7 @@ vld3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")] @@ -10858,7 +10858,7 @@ vld3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8")] @@ -10876,7 +10876,7 @@ vld3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")] @@ -10895,7 +10895,7 @@ vld3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8")] @@ -10915,7 +10915,7 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld3_lane_s8::(transmute(a), transmute(b))) } @@ -10930,7 +10930,7 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld3_lane_s16::(transmute(a), transmute(b))) } @@ -10945,7 +10945,7 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld3_lane_s32::(transmute(a), transmute(b))) } @@ -10960,7 +10960,7 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } @@ -10975,7 +10975,7 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld3q_lane_s32::(transmute(a), transmute(b))) } @@ -10990,7 +10990,7 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld3_lane_s8::(transmute(a), transmute(b))) } @@ -11005,7 +11005,7 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld3_lane_s16::(transmute(a), transmute(b))) } @@ -11020,7 +11020,7 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } @@ -11033,7 +11033,7 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")] @@ -11052,7 +11052,7 @@ vld3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8")] @@ -11070,7 +11070,7 @@ vld3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")] @@ -11089,7 +11089,7 @@ vld3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8")] @@ -12013,7 +12013,7 @@ vld4q_dup_f32_(a as _) #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")] @@ -12032,7 +12032,7 @@ vld4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8")] @@ -12050,7 +12050,7 @@ vld4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")] @@ -12069,7 +12069,7 @@ vld4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8")] @@ -12087,7 +12087,7 @@ vld4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")] @@ -12106,7 +12106,7 @@ vld4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8")] @@ -12124,7 +12124,7 @@ vld4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")] @@ -12143,7 +12143,7 @@ vld4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8")] @@ -12161,7 +12161,7 @@ vld4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")] @@ -12180,7 +12180,7 @@ vld4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8")] @@ -12200,7 +12200,7 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld4_lane_s8::(transmute(a), transmute(b))) } @@ -12215,7 +12215,7 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld4_lane_s16::(transmute(a), transmute(b))) } @@ -12230,7 +12230,7 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vld4_lane_s32::(transmute(a), transmute(b))) } @@ -12245,7 +12245,7 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } @@ -12260,7 +12260,7 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld4q_lane_s32::(transmute(a), transmute(b))) } @@ -12275,7 +12275,7 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld4_lane_s8::(transmute(a), transmute(b))) } @@ -12290,7 +12290,7 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vld4_lane_s16::(transmute(a), transmute(b))) } @@ -12305,7 +12305,7 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } @@ -12318,7 +12318,7 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")] @@ -12337,7 +12337,7 @@ vld4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8")] @@ -12355,7 +12355,7 @@ vld4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")] @@ -12374,7 +12374,7 @@ vld4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8")] @@ -12394,7 +12394,7 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); } @@ -12409,7 +12409,7 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); } @@ -12424,7 +12424,7 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -12439,7 +12439,7 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); } @@ -12454,7 +12454,7 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); *a = simd_extract(b, LANE as u32); } @@ -12469,7 +12469,7 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); } @@ -12484,7 +12484,7 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); } @@ -12499,7 +12499,7 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -12514,7 +12514,7 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); } @@ -12529,7 +12529,7 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); } @@ -12544,7 +12544,7 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -12559,7 +12559,7 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); } @@ -12574,7 +12574,7 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); *a = simd_extract(b, LANE as u32); } @@ -12589,7 +12589,7 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); } @@ -12604,7 +12604,7 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); } @@ -12619,7 +12619,7 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -12634,7 +12634,7 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); } @@ -12649,7 +12649,7 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); } @@ -12664,7 +12664,7 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); *a = simd_extract(b, LANE as u32); } @@ -12679,7 +12679,7 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); } @@ -12694,7 +12694,7 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); } @@ -12709,7 +12709,7 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -12724,7 +12724,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); } @@ -12739,7 +12739,7 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); } @@ -14741,7 +14741,7 @@ vst2q_f32_(b.0, b.1, a as _) #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")] @@ -14760,7 +14760,7 @@ vst2_lane_s8_(a as _, b.0, b.1, LANE, 1) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8")] @@ -14778,7 +14778,7 @@ vst2_lane_s8_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")] @@ -14797,7 +14797,7 @@ vst2_lane_s16_(a as _, b.0, b.1, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8")] @@ -14815,7 +14815,7 @@ vst2_lane_s16_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")] @@ -14834,7 +14834,7 @@ vst2_lane_s32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8")] @@ -14852,7 +14852,7 @@ vst2_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")] @@ -14871,7 +14871,7 @@ vst2q_lane_s16_(a as _, b.0, b.1, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8")] @@ -14889,7 +14889,7 @@ vst2q_lane_s16_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")] @@ -14908,7 +14908,7 @@ vst2q_lane_s32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8")] @@ -14928,7 +14928,7 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst2_lane_s8::(transmute(a), transmute(b))) } @@ -14943,7 +14943,7 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst2_lane_s16::(transmute(a), transmute(b))) } @@ -14958,7 +14958,7 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst2_lane_s32::(transmute(a), transmute(b))) } @@ -14973,7 +14973,7 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) } @@ -14988,7 +14988,7 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst2q_lane_s32::(transmute(a), transmute(b))) } @@ -15003,7 +15003,7 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst2_lane_s8::(transmute(a), transmute(b))) } @@ -15018,7 +15018,7 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst2_lane_s16::(transmute(a), transmute(b))) } @@ -15033,7 +15033,7 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) } @@ -15046,7 +15046,7 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")] @@ -15065,7 +15065,7 @@ vst2_lane_f32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8")] @@ -15083,7 +15083,7 @@ vst2_lane_f32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")] @@ -15102,7 +15102,7 @@ vst2q_lane_f32_(a as _, b.0, b.1, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8")] @@ -15573,7 +15573,7 @@ vst3q_f32_(b.0, b.1, b.2, a as _) #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")] @@ -15592,7 +15592,7 @@ vst3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8")] @@ -15610,7 +15610,7 @@ vst3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")] @@ -15629,7 +15629,7 @@ vst3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8")] @@ -15647,7 +15647,7 @@ vst3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")] @@ -15666,7 +15666,7 @@ vst3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8")] @@ -15684,7 +15684,7 @@ vst3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")] @@ -15703,7 +15703,7 @@ vst3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8")] @@ -15721,7 +15721,7 @@ vst3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")] @@ -15740,7 +15740,7 @@ vst3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8")] @@ -15760,7 +15760,7 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst3_lane_s8::(transmute(a), transmute(b))) } @@ -15775,7 +15775,7 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst3_lane_s16::(transmute(a), transmute(b))) } @@ -15790,7 +15790,7 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst3_lane_s32::(transmute(a), transmute(b))) } @@ -15805,7 +15805,7 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) } @@ -15820,7 +15820,7 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst3q_lane_s32::(transmute(a), transmute(b))) } @@ -15835,7 +15835,7 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst3_lane_s8::(transmute(a), transmute(b))) } @@ -15850,7 +15850,7 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst3_lane_s16::(transmute(a), transmute(b))) } @@ -15865,7 +15865,7 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) } @@ -15878,7 +15878,7 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")] @@ -15897,7 +15897,7 @@ vst3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8")] @@ -15915,7 +15915,7 @@ vst3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")] @@ -15934,7 +15934,7 @@ vst3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8")] @@ -16405,7 +16405,7 @@ vst4q_f32_(b.0, b.1, b.2, b.3, a as _) #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")] @@ -16424,7 +16424,7 @@ vst4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8")] @@ -16442,7 +16442,7 @@ vst4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")] @@ -16461,7 +16461,7 @@ vst4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8")] @@ -16479,7 +16479,7 @@ vst4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")] @@ -16498,7 +16498,7 @@ vst4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8")] @@ -16516,7 +16516,7 @@ vst4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")] @@ -16535,7 +16535,7 @@ vst4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8")] @@ -16553,7 +16553,7 @@ vst4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")] @@ -16572,7 +16572,7 @@ vst4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8")] @@ -16592,7 +16592,7 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst4_lane_s8::(transmute(a), transmute(b))) } @@ -16607,7 +16607,7 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst4_lane_s16::(transmute(a), transmute(b))) } @@ -16622,7 +16622,7 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); transmute(vst4_lane_s32::(transmute(a), transmute(b))) } @@ -16637,7 +16637,7 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) } @@ -16652,7 +16652,7 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst4q_lane_s32::(transmute(a), transmute(b))) } @@ -16667,7 +16667,7 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst4_lane_s8::(transmute(a), transmute(b))) } @@ -16682,7 +16682,7 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); transmute(vst4_lane_s16::(transmute(a), transmute(b))) } @@ -16697,7 +16697,7 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) } @@ -16710,7 +16710,7 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")] @@ -16729,7 +16729,7 @@ vst4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8")] @@ -16747,7 +16747,7 @@ vst4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")] @@ -16766,7 +16766,7 @@ vst4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4) #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8")] @@ -17136,7 +17136,7 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17151,7 +17151,7 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17166,7 +17166,7 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17181,7 +17181,7 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17196,7 +17196,7 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17211,7 +17211,7 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17226,7 +17226,7 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17241,7 +17241,7 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17256,7 +17256,7 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17271,7 +17271,7 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17286,7 +17286,7 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17301,7 +17301,7 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17316,7 +17316,7 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17331,7 +17331,7 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17346,7 +17346,7 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17361,7 +17361,7 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17376,7 +17376,7 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17391,7 +17391,7 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17406,7 +17406,7 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17421,7 +17421,7 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17621,7 +17621,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17636,7 +17636,7 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmull_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17651,7 +17651,7 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17666,7 +17666,7 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17681,7 +17681,7 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17696,7 +17696,7 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vmull_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) } @@ -17711,7 +17711,7 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -17726,7 +17726,7 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) } @@ -19439,7 +19439,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); vqdmull_s16(a, b) } @@ -19455,7 +19455,7 @@ pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int3 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); vqdmull_s32(a, b) } @@ -19523,7 +19523,7 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqaddq_s32(a, vqdmull_lane_s16::(b, c)) } @@ -19538,7 +19538,7 @@ pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); vqaddq_s64(a, vqdmull_lane_s32::(b, c)) } @@ -19605,7 +19605,7 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_imm2!(N); + static_assert_uimm_bits!(N, 2); vqsubq_s32(a, vqdmull_lane_s16::(b, c)) } @@ -19620,7 +19620,7 @@ pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_imm1!(N); + static_assert_uimm_bits!(N, 1); vqsubq_s64(a, vqdmull_lane_s32::(b, c)) } @@ -19767,7 +19767,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) } @@ -19782,7 +19782,7 @@ pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) } @@ -19797,7 +19797,7 @@ pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) } @@ -19812,7 +19812,7 @@ pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) } @@ -20126,7 +20126,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulh_s16(a, b) } @@ -20142,7 +20142,7 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulh_s16(a, b) } @@ -20158,7 +20158,7 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let b: int16x8_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulhq_s16(a, b) } @@ -20174,7 +20174,7 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); let b: int16x8_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulhq_s16(a, b) } @@ -20190,7 +20190,7 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); vqrdmulh_s32(a, b) } @@ -20206,7 +20206,7 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); vqrdmulh_s32(a, b) } @@ -20222,7 +20222,7 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulhq_s32(a, b) } @@ -20238,7 +20238,7 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); vqrdmulhq_s32(a, b) } @@ -20556,7 +20556,7 @@ vqrshlq_u64_(a, b) #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] @@ -20575,7 +20575,7 @@ vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1 #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v8i8")] @@ -20593,7 +20593,7 @@ vqrshrn_n_s16_(a, N) #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] @@ -20612,7 +20612,7 @@ vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v4i16")] @@ -20630,7 +20630,7 @@ vqrshrn_n_s32_(a, N) #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] @@ -20649,7 +20649,7 @@ vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v2i32")] @@ -20667,7 +20667,7 @@ vqrshrn_n_s64_(a, N) #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] @@ -20686,7 +20686,7 @@ vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v8i8")] @@ -20704,7 +20704,7 @@ vqrshrn_n_u16_(a, N) #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] @@ -20723,7 +20723,7 @@ vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v4i16")] @@ -20741,7 +20741,7 @@ vqrshrn_n_u32_(a, N) #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] @@ -20760,7 +20760,7 @@ vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v2i32")] @@ -20778,7 +20778,7 @@ vqrshrn_n_u64_(a, N) #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] @@ -20797,7 +20797,7 @@ vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v8i8")] @@ -20815,7 +20815,7 @@ vqrshrun_n_s16_(a, N) #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] @@ -20834,7 +20834,7 @@ vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v4i16")] @@ -20852,7 +20852,7 @@ vqrshrun_n_s32_(a, N) #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] @@ -20871,7 +20871,7 @@ vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v2i32")] @@ -21195,7 +21195,7 @@ vqshlq_u64_(a, b) #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqshl_s8(a, vdup_n_s8(N as _)) } @@ -21210,7 +21210,7 @@ pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqshlq_s8(a, vdupq_n_s8(N as _)) } @@ -21225,7 +21225,7 @@ pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); vqshl_s16(a, vdup_n_s16(N as _)) } @@ -21240,7 +21240,7 @@ pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); vqshlq_s16(a, vdupq_n_s16(N as _)) } @@ -21255,7 +21255,7 @@ pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); vqshl_s32(a, vdup_n_s32(N as _)) } @@ -21270,7 +21270,7 @@ pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); vqshlq_s32(a, vdupq_n_s32(N as _)) } @@ -21285,7 +21285,7 @@ pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); vqshl_s64(a, vdup_n_s64(N as _)) } @@ -21300,7 +21300,7 @@ pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); vqshlq_s64(a, vdupq_n_s64(N as _)) } @@ -21315,7 +21315,7 @@ pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqshl_u8(a, vdup_n_s8(N as _)) } @@ -21330,7 +21330,7 @@ pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); vqshlq_u8(a, vdupq_n_s8(N as _)) } @@ -21345,7 +21345,7 @@ pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); vqshl_u16(a, vdup_n_s16(N as _)) } @@ -21360,7 +21360,7 @@ pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); vqshlq_u16(a, vdupq_n_s16(N as _)) } @@ -21375,7 +21375,7 @@ pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); vqshl_u32(a, vdup_n_s32(N as _)) } @@ -21390,7 +21390,7 @@ pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); vqshlq_u32(a, vdupq_n_s32(N as _)) } @@ -21405,7 +21405,7 @@ pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); vqshl_u64(a, vdup_n_s64(N as _)) } @@ -21420,7 +21420,7 @@ pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); vqshlq_u64(a, vdupq_n_s64(N as _)) } @@ -21433,7 +21433,7 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] @@ -21452,7 +21452,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i8")] @@ -21470,7 +21470,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] @@ -21489,7 +21489,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i16")] @@ -21507,7 +21507,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16)) #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] @@ -21526,7 +21526,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i32")] @@ -21544,7 +21544,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32)) #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] @@ -21563,7 +21563,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v1i64")] @@ -21581,7 +21581,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64)) #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] @@ -21600,7 +21600,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v16i8")] @@ -21618,7 +21618,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] @@ -21637,7 +21637,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i16")] @@ -21655,7 +21655,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] @@ -21674,7 +21674,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i32")] @@ -21692,7 +21692,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32)) #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] @@ -21711,7 +21711,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i64")] @@ -21729,7 +21729,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64)) #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] @@ -21748,7 +21748,7 @@ vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16 #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v8i8")] @@ -21766,7 +21766,7 @@ vqshrn_n_s16_(a, N) #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] @@ -21785,7 +21785,7 @@ vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v4i16")] @@ -21803,7 +21803,7 @@ vqshrn_n_s32_(a, N) #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] @@ -21822,7 +21822,7 @@ vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v2i32")] @@ -21840,7 +21840,7 @@ vqshrn_n_s64_(a, N) #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] @@ -21859,7 +21859,7 @@ vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u1 #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v8i8")] @@ -21877,7 +21877,7 @@ vqshrn_n_u16_(a, N) #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] @@ -21896,7 +21896,7 @@ vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v4i16")] @@ -21914,7 +21914,7 @@ vqshrn_n_u32_(a, N) #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] @@ -21933,7 +21933,7 @@ vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v2i32")] @@ -21951,7 +21951,7 @@ vqshrn_n_u64_(a, N) #[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] @@ -21970,7 +21970,7 @@ vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1 #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v8i8")] @@ -21988,7 +21988,7 @@ vqshrun_n_s16_(a, N) #[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] @@ -22007,7 +22007,7 @@ vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v4i16")] @@ -22025,7 +22025,7 @@ vqshrun_n_s32_(a, N) #[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] @@ -22044,7 +22044,7 @@ vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v2i32")] @@ -26184,7 +26184,7 @@ vrshlq_u64_(a, b) #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); vrshl_s8(a, vdup_n_s8((-N) as _)) } @@ -26199,7 +26199,7 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); vrshlq_s8(a, vdupq_n_s8((-N) as _)) } @@ -26214,7 +26214,7 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); vrshl_s16(a, vdup_n_s16((-N) as _)) } @@ -26229,7 +26229,7 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); vrshlq_s16(a, vdupq_n_s16((-N) as _)) } @@ -26244,7 +26244,7 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); vrshl_s32(a, vdup_n_s32((-N) as _)) } @@ -26259,7 +26259,7 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); vrshlq_s32(a, vdupq_n_s32((-N) as _)) } @@ -26274,7 +26274,7 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vrshl_s64(a, vdup_n_s64((-N) as _)) } @@ -26289,7 +26289,7 @@ pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vrshlq_s64(a, vdupq_n_s64((-N) as _)) } @@ -26304,7 +26304,7 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); vrshl_u8(a, vdup_n_s8((-N) as _)) } @@ -26319,7 +26319,7 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); vrshlq_u8(a, vdupq_n_s8((-N) as _)) } @@ -26334,7 +26334,7 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); vrshl_u16(a, vdup_n_s16((-N) as _)) } @@ -26349,7 +26349,7 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); vrshlq_u16(a, vdupq_n_s16((-N) as _)) } @@ -26364,7 +26364,7 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); vrshl_u32(a, vdup_n_s32((-N) as _)) } @@ -26379,7 +26379,7 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); vrshlq_u32(a, vdupq_n_s32((-N) as _)) } @@ -26394,7 +26394,7 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vrshl_u64(a, vdup_n_s64((-N) as _)) } @@ -26409,7 +26409,7 @@ pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); vrshlq_u64(a, vdupq_n_s64((-N) as _)) } @@ -26422,7 +26422,7 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] @@ -26441,7 +26441,7 @@ vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16 #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v8i8")] @@ -26459,7 +26459,7 @@ vrshrn_n_s16_(a, N) #[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] @@ -26478,7 +26478,7 @@ vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v4i16")] @@ -26496,7 +26496,7 @@ vrshrn_n_s32_(a, N) #[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] @@ -26515,7 +26515,7 @@ vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) #[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v2i32")] @@ -26535,7 +26535,7 @@ vrshrn_n_s64_(a, N) #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); transmute(vrshrn_n_s16::(transmute(a))) } @@ -26550,7 +26550,7 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); transmute(vrshrn_n_s32::(transmute(a))) } @@ -26565,7 +26565,7 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); transmute(vrshrn_n_s64::(transmute(a))) } @@ -26580,7 +26580,7 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vrshr_n_s8::(b)) } @@ -26595,7 +26595,7 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vrshrq_n_s8::(b)) } @@ -26610,7 +26610,7 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vrshr_n_s16::(b)) } @@ -26625,7 +26625,7 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vrshrq_n_s16::(b)) } @@ -26640,7 +26640,7 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vrshr_n_s32::(b)) } @@ -26655,7 +26655,7 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vrshrq_n_s32::(b)) } @@ -26670,7 +26670,7 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vrshr_n_s64::(b)) } @@ -26685,7 +26685,7 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vrshrq_n_s64::(b)) } @@ -26700,7 +26700,7 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vrshr_n_u8::(b)) } @@ -26715,7 +26715,7 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vrshrq_n_u8::(b)) } @@ -26730,7 +26730,7 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vrshr_n_u16::(b)) } @@ -26745,7 +26745,7 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vrshrq_n_u16::(b)) } @@ -26760,7 +26760,7 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vrshr_n_u32::(b)) } @@ -26775,7 +26775,7 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vrshrq_n_u32::(b)) } @@ -26790,7 +26790,7 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vrshr_n_u64::(b)) } @@ -26805,7 +26805,7 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vrshrq_n_u64::(b)) } @@ -26916,7 +26916,7 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) } @@ -26931,7 +26931,7 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) } @@ -26946,7 +26946,7 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -26961,7 +26961,7 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) } @@ -26976,7 +26976,7 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) } @@ -26991,7 +26991,7 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) } @@ -27006,7 +27006,7 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -27021,7 +27021,7 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) } @@ -27036,7 +27036,7 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) } @@ -27051,7 +27051,7 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) } @@ -27066,7 +27066,7 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) } @@ -27081,7 +27081,7 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); simd_insert(b, LANE as u32, a) } @@ -27096,7 +27096,7 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) } @@ -27111,7 +27111,7 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) } @@ -27126,7 +27126,7 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -27141,7 +27141,7 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); simd_insert(b, LANE as u32, a) } @@ -27156,7 +27156,7 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) } @@ -27171,7 +27171,7 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) } @@ -27186,7 +27186,7 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -27201,7 +27201,7 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); simd_insert(b, LANE as u32, a) } @@ -27216,7 +27216,7 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) } @@ -27231,7 +27231,7 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -27246,7 +27246,7 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) } @@ -27261,7 +27261,7 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) } @@ -27580,7 +27580,7 @@ vshlq_u64_(a, b) #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shl(a, vdup_n_s8(N as _)) } @@ -27595,7 +27595,7 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shl(a, vdupq_n_s8(N as _)) } @@ -27610,7 +27610,7 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shl(a, vdup_n_s16(N as _)) } @@ -27625,7 +27625,7 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shl(a, vdupq_n_s16(N as _)) } @@ -27640,7 +27640,7 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_shl(a, vdup_n_s32(N as _)) } @@ -27655,7 +27655,7 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_shl(a, vdupq_n_s32(N as _)) } @@ -27670,7 +27670,7 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shl(a, vdup_n_u8(N as _)) } @@ -27685,7 +27685,7 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_imm3!(N); + static_assert_uimm_bits!(N, 3); simd_shl(a, vdupq_n_u8(N as _)) } @@ -27700,7 +27700,7 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shl(a, vdup_n_u16(N as _)) } @@ -27715,7 +27715,7 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_imm4!(N); + static_assert_uimm_bits!(N, 4); simd_shl(a, vdupq_n_u16(N as _)) } @@ -27730,7 +27730,7 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_shl(a, vdup_n_u32(N as _)) } @@ -27745,7 +27745,7 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_imm5!(N); + static_assert_uimm_bits!(N, 5); simd_shl(a, vdupq_n_u32(N as _)) } @@ -27760,7 +27760,7 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_shl(a, vdup_n_s64(N as _)) } @@ -27775,7 +27775,7 @@ pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_shl(a, vdupq_n_s64(N as _)) } @@ -27790,7 +27790,7 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_shl(a, vdup_n_u64(N as _)) } @@ -27805,7 +27805,7 @@ pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_imm6!(N); + static_assert_uimm_bits!(N, 6); simd_shl(a, vdupq_n_u64(N as _)) } @@ -27820,7 +27820,7 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { - static_assert!(N : i32 where N >= 0 && N <= 8); + static_assert!(N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_s16(N as _)) } @@ -27835,7 +27835,7 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 0 && N <= 16); + static_assert!(N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_s32(N as _)) } @@ -27850,7 +27850,7 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { - static_assert!(N : i32 where N >= 0 && N <= 32); + static_assert!(N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_s64(N as _)) } @@ -27865,7 +27865,7 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 0 && N <= 8); + static_assert!(N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_u16(N as _)) } @@ -27880,7 +27880,7 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 0 && N <= 16); + static_assert!(N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_u32(N as _)) } @@ -27895,7 +27895,7 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 0 && N <= 32); + static_assert!(N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_u64(N as _)) } @@ -27910,7 +27910,7 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; simd_shr(a, vdup_n_s8(n as _)) } @@ -27926,7 +27926,7 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; simd_shr(a, vdupq_n_s8(n as _)) } @@ -27942,7 +27942,7 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; simd_shr(a, vdup_n_s16(n as _)) } @@ -27958,7 +27958,7 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; simd_shr(a, vdupq_n_s16(n as _)) } @@ -27974,7 +27974,7 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; simd_shr(a, vdup_n_s32(n as _)) } @@ -27990,7 +27990,7 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; simd_shr(a, vdupq_n_s32(n as _)) } @@ -28006,7 +28006,7 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; simd_shr(a, vdup_n_s64(n as _)) } @@ -28022,7 +28022,7 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; simd_shr(a, vdupq_n_s64(n as _)) } @@ -28038,7 +28038,7 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N }; simd_shr(a, vdup_n_u8(n as _)) } @@ -28054,7 +28054,7 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N }; simd_shr(a, vdupq_n_u8(n as _)) } @@ -28070,7 +28070,7 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N }; simd_shr(a, vdup_n_u16(n as _)) } @@ -28086,7 +28086,7 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N }; simd_shr(a, vdupq_n_u16(n as _)) } @@ -28102,7 +28102,7 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N }; simd_shr(a, vdup_n_u32(n as _)) } @@ -28118,7 +28118,7 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N }; simd_shr(a, vdupq_n_u32(n as _)) } @@ -28134,7 +28134,7 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N }; simd_shr(a, vdup_n_u64(n as _)) } @@ -28150,7 +28150,7 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N }; simd_shr(a, vdupq_n_u64(n as _)) } @@ -28166,7 +28166,7 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_s16(N as _))) } @@ -28181,7 +28181,7 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_s32(N as _))) } @@ -28196,7 +28196,7 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_s64(N as _))) } @@ -28211,7 +28211,7 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_u16(N as _))) } @@ -28226,7 +28226,7 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_u32(N as _))) } @@ -28241,7 +28241,7 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_u64(N as _))) } @@ -28256,7 +28256,7 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vshr_n_s8::(b)) } @@ -28271,7 +28271,7 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vshrq_n_s8::(b)) } @@ -28286,7 +28286,7 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vshr_n_s16::(b)) } @@ -28301,7 +28301,7 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vshrq_n_s16::(b)) } @@ -28316,7 +28316,7 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vshr_n_s32::(b)) } @@ -28331,7 +28331,7 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vshrq_n_s32::(b)) } @@ -28346,7 +28346,7 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vshr_n_s64::(b)) } @@ -28361,7 +28361,7 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vshrq_n_s64::(b)) } @@ -28376,7 +28376,7 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vshr_n_u8::(b)) } @@ -28391,7 +28391,7 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N : i32 where N >= 1 && N <= 8); + static_assert!(N >= 1 && N <= 8); simd_add(a, vshrq_n_u8::(b)) } @@ -28406,7 +28406,7 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vshr_n_u16::(b)) } @@ -28421,7 +28421,7 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N : i32 where N >= 1 && N <= 16); + static_assert!(N >= 1 && N <= 16); simd_add(a, vshrq_n_u16::(b)) } @@ -28436,7 +28436,7 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vshr_n_u32::(b)) } @@ -28451,7 +28451,7 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N : i32 where N >= 1 && N <= 32); + static_assert!(N >= 1 && N <= 32); simd_add(a, vshrq_n_u32::(b)) } @@ -28466,7 +28466,7 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vshr_n_u64::(b)) } @@ -28481,7 +28481,7 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N : i32 where N >= 1 && N <= 64); + static_assert!(N >= 1 && N <= 64); simd_add(a, vshrq_n_u64::(b)) } diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index c7fc74b2a637..92326596645c 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -975,7 +975,7 @@ extern "unadjusted" { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) } @@ -991,7 +991,7 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); simd_insert(src, LANE as u32, *ptr) } @@ -1007,7 +1007,7 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) } @@ -1023,7 +1023,7 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) } @@ -1039,7 +1039,7 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1055,7 +1055,7 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) } @@ -1071,7 +1071,7 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) } @@ -1087,7 +1087,7 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1103,7 +1103,7 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) } @@ -1119,7 +1119,7 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); simd_insert(src, LANE as u32, *ptr) } @@ -1135,7 +1135,7 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) } @@ -1151,7 +1151,7 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) } @@ -1167,7 +1167,7 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1183,7 +1183,7 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) } @@ -1199,7 +1199,7 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) } @@ -1215,7 +1215,7 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1231,7 +1231,7 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) } @@ -1247,7 +1247,7 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { - static_assert_imm4!(LANE); + static_assert_uimm_bits!(LANE, 4); simd_insert(src, LANE as u32, *ptr) } @@ -1263,7 +1263,7 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) } @@ -1279,7 +1279,7 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { - static_assert_imm3!(LANE); + static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) } @@ -1297,7 +1297,7 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { - static_assert!(LANE : i32 where LANE == 0); + static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) } @@ -1315,7 +1315,7 @@ pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) - stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1331,7 +1331,7 @@ pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { - static_assert_imm1!(LANE); + static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) } @@ -1347,7 +1347,7 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { - static_assert_imm2!(LANE); + static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) } @@ -4655,7 +4655,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -4670,7 +4670,7 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { - static_assert!(IMM5 : i32 where IMM5 == 0); + static_assert!(IMM5 == 0); simd_extract(v, 0) } @@ -4685,7 +4685,7 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { - static_assert_imm2!(IMM5); + static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) } @@ -4700,7 +4700,7 @@ pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { - static_assert_imm2!(IMM5); + static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) } @@ -4715,7 +4715,7 @@ pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { - static_assert_imm2!(IMM5); + static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) } @@ -4730,7 +4730,7 @@ pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -4745,7 +4745,7 @@ pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -4760,7 +4760,7 @@ pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -4775,7 +4775,7 @@ pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { - static_assert_imm2!(IMM5); + static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) } @@ -4790,7 +4790,7 @@ pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { - static_assert!(IMM5 : i32 where IMM5 == 0); + static_assert!(IMM5 == 0); simd_extract(v, IMM5 as u32) } @@ -4805,7 +4805,7 @@ pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -4820,7 +4820,7 @@ pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { - static_assert!(IMM5 : i32 where IMM5 == 0); + static_assert!(IMM5 == 0); simd_extract(v, IMM5 as u32) } @@ -4835,7 +4835,7 @@ pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { - static_assert_imm1!(IMM5); + static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) } @@ -4850,7 +4850,7 @@ pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { - static_assert_imm3!(IMM5); + static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) } @@ -4865,7 +4865,7 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { - static_assert_imm2!(IMM5); + static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) } @@ -4880,7 +4880,7 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { - static_assert_imm3!(IMM5); + static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) } @@ -4895,7 +4895,7 @@ pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { - static_assert_imm3!(IMM5); + static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) } @@ -4910,7 +4910,7 @@ pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { - static_assert_imm2!(IMM5); + static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) } @@ -4925,7 +4925,7 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { - static_assert_imm3!(IMM5); + static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) } @@ -4940,7 +4940,7 @@ pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { - static_assert_imm3!(IMM5); + static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) } @@ -4955,7 +4955,7 @@ pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { - static_assert_imm3!(IMM5); + static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) } @@ -4970,7 +4970,7 @@ pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { - static_assert_imm4!(IMM5); + static_assert_uimm_bits!(IMM5, 4); simd_extract(v, IMM5 as u32) } @@ -4985,7 +4985,7 @@ pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { - static_assert_imm4!(IMM5); + static_assert_uimm_bits!(IMM5, 4); simd_extract(v, IMM5 as u32) } @@ -5000,7 +5000,7 @@ pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { - static_assert_imm4!(IMM5); + static_assert_uimm_bits!(IMM5, 4); simd_extract(v, IMM5 as u32) } @@ -6000,7 +6000,7 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } @@ -6016,7 +6016,7 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { stable(feature = "neon_intrinsics", since = "1.59.0") )] pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { - static_assert!(N : i32 where N == 0); + static_assert!(N == 0); a } diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index cc9783318657..0c86a24ca09f 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -1,81 +1,51 @@ //! Utility macros. -// Helper struct used to trigger const eval errors when the const generic immediate value `IMM` is -// out of `[MIN-MAX]` range. -pub(crate) struct ValidateConstImm; -impl ValidateConstImm { - pub(crate) const VALID: () = { - assert!(IMM >= MIN && IMM <= MAX, "IMM value not in expected range"); - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm1 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 1) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm2 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 2) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm3 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 3) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm4 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 4) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm5 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 5) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm6 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 6) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm8 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 8) - 1 }>::VALID; - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm16 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 16) - 1 }>::VALID; - }; -} - #[allow(unused)] macro_rules! static_assert { - ($imm:ident : $ty:ty where $e:expr) => {{ - struct Validate(); - impl Validate<$imm> { - const VALID: () = { - assert!($e, concat!("Assertion failed: ", stringify!($e))); - }; + ($e:expr) => { + const { + assert!($e); } - let _ = Validate::<$imm>::VALID; - }}; + }; + ($e:expr, $msg:expr) => { + const { + assert!($e, $msg); + } + }; +} + +#[allow(unused_macros)] +macro_rules! static_assert_uimm_bits { + ($imm:ident, $bits:expr) => { + // `0 <= $imm` produces a warning if the immediate has an unsigned type + #[allow(unused_comparisons)] + { + static_assert!( + 0 <= $imm && $imm <= (1 << $bits) - 1, + concat!( + stringify!($imm), + " doesn't fit in ", + stringify!($bits), + " bits", + ) + ) + } + }; +} + +#[allow(unused_macros)] +macro_rules! static_assert_simm_bits { + ($imm:ident, $bits:expr) => { + static_assert!( + (-1 << ($bits - 1)) - 1 <= $imm && $imm <= (1 << ($bits - 1)) - 1, + concat!( + stringify!($imm), + " doesn't fit in ", + stringify!($bits), + " bits", + ) + ) + }; } #[allow(unused)] diff --git a/library/stdarch/crates/core_arch/src/mips/msa.rs b/library/stdarch/crates/core_arch/src/mips/msa.rs index cded73a544cf..3e93db85e2d1 100644 --- a/library/stdarch/crates/core_arch/src/mips/msa.rs +++ b/library/stdarch/crates/core_arch/src/mips/msa.rs @@ -10,9 +10,6 @@ use stdarch_test::assert_instr; use crate::mem; -#[macro_use] -mod macros; - types! { // / MIPS-specific 128-bit wide vector of 16 packed `i8`. pub struct v16i8( @@ -1413,7 +1410,7 @@ pub unsafe fn __msa_addv_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(addvi.b, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_addvi_b(a: v16i8) -> v16i8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_addvi_b(a, IMM5) } @@ -1428,7 +1425,7 @@ pub unsafe fn __msa_addvi_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(addvi.h, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_addvi_h(a: v8i16) -> v8i16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_addvi_h(a, IMM5) } @@ -1443,7 +1440,7 @@ pub unsafe fn __msa_addvi_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(addvi.w, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_addvi_w(a: v4i32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_addvi_w(a, IMM5) } @@ -1458,7 +1455,7 @@ pub unsafe fn __msa_addvi_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(addvi.d, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_addvi_d(a: v2i64) -> v2i64 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_addvi_d(a, IMM5) } @@ -1487,7 +1484,7 @@ pub unsafe fn __msa_and_v(a: v16u8, b: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(andi.b, imm8 = 0b10010111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_andi_b(a: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_andi_b(a, IMM8) } @@ -1910,7 +1907,7 @@ pub unsafe fn __msa_bclr_d(a: v2u64, b: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(bclri.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bclri_b(a: v16u8) -> v16u8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_bclri_b(a, IMM3) } @@ -1925,7 +1922,7 @@ pub unsafe fn __msa_bclri_b(a: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(bclri.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bclri_h(a: v8u16) -> v8u16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_bclri_h(a, IMM4) } @@ -1940,7 +1937,7 @@ pub unsafe fn __msa_bclri_h(a: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(bclri.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bclri_w(a: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_bclri_w(a, IMM5) } @@ -1955,7 +1952,7 @@ pub unsafe fn __msa_bclri_w(a: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(bclri.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bclri_d(a: v2u64) -> v2u64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_bclri_d(a, IMM6) } @@ -2026,7 +2023,7 @@ pub unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8) -> v16u8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_binsli_b(a, mem::transmute(b), IMM3) } @@ -2041,7 +2038,7 @@ pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16) -> v8u16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_binsli_h(a, mem::transmute(b), IMM4) } @@ -2056,7 +2053,7 @@ pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_binsli_w(a, mem::transmute(b), IMM5) } @@ -2071,7 +2068,7 @@ pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsli_d(a: v2u64, b: v2u64) -> v2u64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_binsli_d(a, mem::transmute(b), IMM6) } @@ -2142,7 +2139,7 @@ pub unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8) -> v16u8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_binsri_b(a, mem::transmute(b), IMM3) } @@ -2157,7 +2154,7 @@ pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16) -> v8u16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_binsri_h(a, mem::transmute(b), IMM4) } @@ -2172,7 +2169,7 @@ pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_binsri_w(a, mem::transmute(b), IMM5) } @@ -2187,7 +2184,7 @@ pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_binsri_d(a: v2u64, b: v2u64) -> v2u64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_binsri_d(a, mem::transmute(b), IMM6) } @@ -2216,7 +2213,7 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_bmnzi_b(a: v16u8, b: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_bmnzi_b(a, mem::transmute(b), IMM8) } @@ -2245,7 +2242,7 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_bmzi_b(a: v16u8, b: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_bmzi_b(a, mem::transmute(b), IMM8) } @@ -2316,7 +2313,7 @@ pub unsafe fn __msa_bneg_d(a: v2u64, b: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(bnegi.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bnegi_b(a: v16u8) -> v16u8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_bnegi_b(a, IMM3) } @@ -2331,7 +2328,7 @@ pub unsafe fn __msa_bnegi_b(a: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(bnegi.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bnegi_h(a: v8u16) -> v8u16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_bnegi_h(a, IMM4) } @@ -2346,7 +2343,7 @@ pub unsafe fn __msa_bnegi_h(a: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(bnegi.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bnegi_w(a: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_bnegi_w(a, IMM5) } @@ -2361,7 +2358,7 @@ pub unsafe fn __msa_bnegi_w(a: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(bnegi.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bnegi_d(a: v2u64) -> v2u64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_bnegi_d(a, IMM6) } @@ -2446,7 +2443,7 @@ pub unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_bseli_b(a: v16u8, b: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_bseli_b(a, mem::transmute(b), IMM8) } @@ -2517,7 +2514,7 @@ pub unsafe fn __msa_bset_d(a: v2u64, b: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(bseti.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bseti_b(a: v16u8) -> v16u8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_bseti_b(a, IMM3) } @@ -2532,7 +2529,7 @@ pub unsafe fn __msa_bseti_b(a: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(bseti.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bseti_h(a: v8u16) -> v8u16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_bseti_h(a, IMM4) } @@ -2547,7 +2544,7 @@ pub unsafe fn __msa_bseti_h(a: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(bseti.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bseti_w(a: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_bseti_w(a, IMM5) } @@ -2562,7 +2559,7 @@ pub unsafe fn __msa_bseti_w(a: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(bseti.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_bseti_d(a: v2u64) -> v2u64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_bseti_d(a, IMM6) } @@ -2685,7 +2682,7 @@ pub unsafe fn __msa_ceq_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(ceqi.b, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ceqi_b(a: v16i8) -> v16i8 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_b(a, IMM_S5) } @@ -2700,7 +2697,7 @@ pub unsafe fn __msa_ceqi_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(ceqi.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ceqi_h(a: v8i16) -> v8i16 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_h(a, IMM_S5) } @@ -2715,7 +2712,7 @@ pub unsafe fn __msa_ceqi_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(ceqi.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ceqi_w(a: v4i32) -> v4i32 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_w(a, IMM_S5) } @@ -2730,7 +2727,7 @@ pub unsafe fn __msa_ceqi_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(ceqi.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ceqi_d(a: v2i64) -> v2i64 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_d(a, IMM_S5) } @@ -2744,7 +2741,7 @@ pub unsafe fn __msa_ceqi_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(cfcmsa, imm5 = 0b11111))] #[rustc_legacy_const_generics(0)] pub unsafe fn __msa_cfcmsa() -> i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_cfcmsa(IMM5) } @@ -2872,7 +2869,7 @@ pub unsafe fn __msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64 { #[cfg_attr(test, assert_instr(clei_s.b, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_s_b(a: v16i8) -> v16i8 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_b(a, IMM_S5) } @@ -2888,7 +2885,7 @@ pub unsafe fn __msa_clei_s_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(clei_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_s_h(a: v8i16) -> v8i16 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_h(a, IMM_S5) } @@ -2904,7 +2901,7 @@ pub unsafe fn __msa_clei_s_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(clei_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_s_w(a: v4i32) -> v4i32 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_w(a, IMM_S5) } @@ -2920,7 +2917,7 @@ pub unsafe fn __msa_clei_s_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(clei_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_s_d(a: v2i64) -> v2i64 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_d(a, IMM_S5) } @@ -2936,7 +2933,7 @@ pub unsafe fn __msa_clei_s_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(clei_u.b, imm5 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_u_b(a: v16u8) -> v16i8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clei_u_b(a, IMM5) } @@ -2952,7 +2949,7 @@ pub unsafe fn __msa_clei_u_b(a: v16u8) -> v16i8 { #[cfg_attr(test, assert_instr(clei_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_u_h(a: v8u16) -> v8i16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clei_u_h(a, IMM5) } @@ -2968,7 +2965,7 @@ pub unsafe fn __msa_clei_u_h(a: v8u16) -> v8i16 { #[cfg_attr(test, assert_instr(clei_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_u_w(a: v4u32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clei_u_w(a, IMM5) } @@ -2984,7 +2981,7 @@ pub unsafe fn __msa_clei_u_w(a: v4u32) -> v4i32 { #[cfg_attr(test, assert_instr(clei_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clei_u_d(a: v2u64) -> v2i64 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clei_u_d(a, IMM5) } @@ -3112,7 +3109,7 @@ pub unsafe fn __msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64 { #[cfg_attr(test, assert_instr(clti_s.b, imm_s5 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_s_b(a: v16i8) -> v16i8 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_b(a, IMM_S5) } @@ -3128,7 +3125,7 @@ pub unsafe fn __msa_clti_s_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(clti_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_s_h(a: v8i16) -> v8i16 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_h(a, IMM_S5) } @@ -3144,7 +3141,7 @@ pub unsafe fn __msa_clti_s_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(clti_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_s_w(a: v4i32) -> v4i32 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_w(a, IMM_S5) } @@ -3160,7 +3157,7 @@ pub unsafe fn __msa_clti_s_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(clti_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_s_d(a: v2i64) -> v2i64 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_d(a, IMM_S5) } @@ -3176,7 +3173,7 @@ pub unsafe fn __msa_clti_s_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(clti_u.b, imm5 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_u_b(a: v16u8) -> v16i8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clti_u_b(a, IMM5) } @@ -3192,7 +3189,7 @@ pub unsafe fn __msa_clti_u_b(a: v16u8) -> v16i8 { #[cfg_attr(test, assert_instr(clti_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_u_h(a: v8u16) -> v8i16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clti_u_h(a, IMM5) } @@ -3208,7 +3205,7 @@ pub unsafe fn __msa_clti_u_h(a: v8u16) -> v8i16 { #[cfg_attr(test, assert_instr(clti_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_u_w(a: v4u32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clti_u_w(a, IMM5) } @@ -3224,7 +3221,7 @@ pub unsafe fn __msa_clti_u_w(a: v4u32) -> v4i32 { #[cfg_attr(test, assert_instr(clti_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_clti_u_d(a: v2u64) -> v2i64 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_clti_u_d(a, IMM5) } @@ -3238,7 +3235,7 @@ pub unsafe fn __msa_clti_u_d(a: v2u64) -> v2i64 { #[cfg_attr(test, assert_instr(copy_s.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_s_b(a: v16i8) -> i32 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_copy_s_b(a, IMM4) } @@ -3252,7 +3249,7 @@ pub unsafe fn __msa_copy_s_b(a: v16i8) -> i32 { #[cfg_attr(test, assert_instr(copy_s.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_s_h(a: v8i16) -> i32 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_copy_s_h(a, IMM3) } @@ -3266,7 +3263,7 @@ pub unsafe fn __msa_copy_s_h(a: v8i16) -> i32 { #[cfg_attr(test, assert_instr(copy_s.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_s_w(a: v4i32) -> i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_copy_s_w(a, IMM2) } @@ -3280,7 +3277,7 @@ pub unsafe fn __msa_copy_s_w(a: v4i32) -> i32 { #[cfg_attr(test, assert_instr(copy_s.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_s_d(a: v2i64) -> i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_copy_s_d(a, IMM1) } @@ -3294,7 +3291,7 @@ pub unsafe fn __msa_copy_s_d(a: v2i64) -> i64 { #[cfg_attr(test, assert_instr(copy_u.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_u_b(a: v16i8) -> u32 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_copy_u_b(a, IMM4) } @@ -3308,7 +3305,7 @@ pub unsafe fn __msa_copy_u_b(a: v16i8) -> u32 { #[cfg_attr(test, assert_instr(copy_u.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_u_h(a: v8i16) -> u32 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_copy_u_h(a, IMM3) } @@ -3322,7 +3319,7 @@ pub unsafe fn __msa_copy_u_h(a: v8i16) -> u32 { #[cfg_attr(test, assert_instr(copy_u.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_u_w(a: v4i32) -> u32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_copy_u_w(a, IMM2) } @@ -3336,7 +3333,7 @@ pub unsafe fn __msa_copy_u_w(a: v4i32) -> u32 { #[cfg_attr(test, assert_instr(copy_u.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_copy_u_d(a: v2i64) -> u64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_copy_u_d(a, IMM1) } @@ -3352,7 +3349,7 @@ pub unsafe fn __msa_copy_u_d(a: v2i64) -> u64 { #[cfg_attr(test, assert_instr(ctcmsa, imm1 = 0b1))] #[rustc_legacy_const_generics(0)] pub unsafe fn __msa_ctcmsa(a: i32) -> () { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_ctcmsa(IMM5, a) } @@ -5568,7 +5565,7 @@ pub unsafe fn __msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(insert.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insert_b(a: v16i8, c: i32) -> v16i8 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_insert_b(a, IMM4, c) } @@ -5583,7 +5580,7 @@ pub unsafe fn __msa_insert_b(a: v16i8, c: i32) -> v16i8 { #[cfg_attr(test, assert_instr(insert.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insert_h(a: v8i16, c: i32) -> v8i16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_insert_h(a, IMM3, c) } @@ -5598,7 +5595,7 @@ pub unsafe fn __msa_insert_h(a: v8i16, c: i32) -> v8i16 { #[cfg_attr(test, assert_instr(insert.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insert_w(a: v4i32, c: i32) -> v4i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_insert_w(a, IMM2, c) } @@ -5613,7 +5610,7 @@ pub unsafe fn __msa_insert_w(a: v4i32, c: i32) -> v4i32 { #[cfg_attr(test, assert_instr(insert.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insert_d(a: v2i64, c: i64) -> v2i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_insert_d(a, IMM1, c) } @@ -5628,7 +5625,7 @@ pub unsafe fn __msa_insert_d(a: v2i64, c: i64) -> v2i64 { #[cfg_attr(test, assert_instr(insve.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insve_b(a: v16i8, c: v16i8) -> v16i8 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_insve_b(a, IMM4, c) } @@ -5643,7 +5640,7 @@ pub unsafe fn __msa_insve_b(a: v16i8, c: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(insve.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insve_h(a: v8i16, c: v8i16) -> v8i16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_insve_h(a, IMM3, c) } @@ -5658,7 +5655,7 @@ pub unsafe fn __msa_insve_h(a: v8i16, c: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(insve.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insve_w(a: v4i32, c: v4i32) -> v4i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_insve_w(a, IMM2, c) } @@ -5673,7 +5670,7 @@ pub unsafe fn __msa_insve_w(a: v4i32, c: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(insve.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_insve_d(a: v2i64, c: v2i64) -> v2i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_insve_d(a, IMM1, c) } @@ -5688,7 +5685,7 @@ pub unsafe fn __msa_insve_d(a: v2i64, c: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(ld.b, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ld_b(mem_addr: *mut u8) -> v16i8 { - static_assert_imm_s10!(IMM_S10); + static_assert_simm_bits!(IMM_S10, 10); msa_ld_b(mem_addr, IMM_S10) } @@ -5703,8 +5700,8 @@ pub unsafe fn __msa_ld_b(mem_addr: *mut u8) -> v16i8 { #[cfg_attr(test, assert_instr(ld.h, imm_s11 = 0b11111111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ld_h(mem_addr: *mut u8) -> v8i16 { - static_assert_imm_s11!(IMM_S11); - static_assert!(IMM_S11: i32 where IMM_S11 % 2 == 0); + static_assert_simm_bits!(IMM_S11, 11); + static_assert!(IMM_S11 % 2 == 0); msa_ld_h(mem_addr, IMM_S11) } @@ -5719,8 +5716,8 @@ pub unsafe fn __msa_ld_h(mem_addr: *mut u8) -> v8i16 { #[cfg_attr(test, assert_instr(ld.w, imm_s12 = 0b111111111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ld_w(mem_addr: *mut u8) -> v4i32 { - static_assert_imm_s12!(IMM_S12); - static_assert!(IMM_S12: i32 where IMM_S12 % 4 == 0); + static_assert_simm_bits!(IMM_S12, 12); + static_assert!(IMM_S12 % 4 == 0); msa_ld_w(mem_addr, IMM_S12) } @@ -5735,8 +5732,8 @@ pub unsafe fn __msa_ld_w(mem_addr: *mut u8) -> v4i32 { #[cfg_attr(test, assert_instr(ld.d, imm_s13 = 0b1111111111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ld_d(mem_addr: *mut u8) -> v2i64 { - static_assert_imm_s13!(IMM_S13); - static_assert!(IMM_S13: i32 where IMM_S13 % 8 == 0); + static_assert_simm_bits!(IMM_S13, 13); + static_assert!(IMM_S13 % 8 == 0); msa_ld_d(mem_addr, IMM_S13) } @@ -5751,7 +5748,7 @@ pub unsafe fn __msa_ld_d(mem_addr: *mut u8) -> v2i64 { #[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] pub unsafe fn __msa_ldi_b() -> v16i8 { - static_assert_imm_s10!(IMM_S10); + static_assert_simm_bits!(IMM_S10, 10); msa_ldi_b(IMM_S10) } @@ -5766,7 +5763,7 @@ pub unsafe fn __msa_ldi_b() -> v16i8 { #[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] pub unsafe fn __msa_ldi_h() -> v8i16 { - static_assert_imm_s10!(IMM_S10); + static_assert_simm_bits!(IMM_S10, 10); msa_ldi_h(IMM_S10) } @@ -5781,7 +5778,7 @@ pub unsafe fn __msa_ldi_h() -> v8i16 { #[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] pub unsafe fn __msa_ldi_w() -> v4i32 { - static_assert_imm_s10!(IMM_S10); + static_assert_simm_bits!(IMM_S10, 10); msa_ldi_w(IMM_S10) } @@ -5796,7 +5793,7 @@ pub unsafe fn __msa_ldi_w() -> v4i32 { #[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] pub unsafe fn __msa_ldi_d() -> v2i64 { - static_assert_imm_s10!(IMM_S10); + static_assert_simm_bits!(IMM_S10, 10); msa_ldi_d(IMM_S10) } @@ -6087,7 +6084,7 @@ pub unsafe fn __msa_max_u_d(a: v2u64, b: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(maxi_s.b, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_s_b(a: v16i8) -> v16i8 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_b(a, IMM_S5) } @@ -6102,7 +6099,7 @@ pub unsafe fn __msa_maxi_s_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(maxi_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_s_h(a: v8i16) -> v8i16 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_h(a, IMM_S5) } @@ -6117,7 +6114,7 @@ pub unsafe fn __msa_maxi_s_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(maxi_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_s_w(a: v4i32) -> v4i32 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_w(a, IMM_S5) } @@ -6132,7 +6129,7 @@ pub unsafe fn __msa_maxi_s_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(maxi_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_s_d(a: v2i64) -> v2i64 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_d(a, IMM_S5) } @@ -6147,7 +6144,7 @@ pub unsafe fn __msa_maxi_s_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(maxi_u.b, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_u_b(a: v16u8) -> v16u8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_b(a, IMM5) } @@ -6162,7 +6159,7 @@ pub unsafe fn __msa_maxi_u_b(a: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(maxi_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_u_h(a: v8u16) -> v8u16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_h(a, IMM5) } @@ -6177,7 +6174,7 @@ pub unsafe fn __msa_maxi_u_h(a: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(maxi_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_u_w(a: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_w(a, IMM5) } @@ -6192,7 +6189,7 @@ pub unsafe fn __msa_maxi_u_w(a: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(maxi_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_maxi_u_d(a: v2u64) -> v2u64 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_d(a, IMM5) } @@ -6315,7 +6312,7 @@ pub unsafe fn __msa_min_s_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(mini_s.b, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_s_b(a: v16i8) -> v16i8 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_b(a, IMM_S5) } @@ -6330,7 +6327,7 @@ pub unsafe fn __msa_mini_s_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(mini_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_s_h(a: v8i16) -> v8i16 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_h(a, IMM_S5) } @@ -6345,7 +6342,7 @@ pub unsafe fn __msa_mini_s_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(mini_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_s_w(a: v4i32) -> v4i32 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_w(a, IMM_S5) } @@ -6360,7 +6357,7 @@ pub unsafe fn __msa_mini_s_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(mini_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_s_d(a: v2i64) -> v2i64 { - static_assert_imm_s5!(IMM_S5); + static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_d(a, IMM_S5) } @@ -6427,7 +6424,7 @@ pub unsafe fn __msa_min_u_d(a: v2u64, b: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(mini_u.b, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_u_b(a: v16u8) -> v16u8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_mini_u_b(a, IMM5) } @@ -6442,7 +6439,7 @@ pub unsafe fn __msa_mini_u_b(a: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(mini_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_u_h(a: v8u16) -> v8u16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_mini_u_h(a, IMM5) } @@ -6457,7 +6454,7 @@ pub unsafe fn __msa_mini_u_h(a: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(mini_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_u_w(a: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_mini_u_w(a, IMM5) } @@ -6472,7 +6469,7 @@ pub unsafe fn __msa_mini_u_w(a: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(mini_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_mini_u_d(a: v2u64) -> v2u64 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_mini_u_d(a, IMM5) } @@ -6958,7 +6955,7 @@ pub unsafe fn __msa_nor_v(a: v16u8, b: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(nori.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_nori_b(a: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_nori_b(a, IMM8) } @@ -6988,7 +6985,7 @@ pub unsafe fn __msa_or_v(a: v16u8, b: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(ori.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_ori_b(a: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_ori_b(a, IMM8) } @@ -7155,7 +7152,7 @@ pub unsafe fn __msa_pcnt_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(sat_s.b, imm4 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_s_b(a: v16i8) -> v16i8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_sat_s_b(a, IMM3) } @@ -7170,7 +7167,7 @@ pub unsafe fn __msa_sat_s_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(sat_s.h, imm3 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_s_h(a: v8i16) -> v8i16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_sat_s_h(a, IMM4) } @@ -7185,7 +7182,7 @@ pub unsafe fn __msa_sat_s_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(sat_s.w, imm2 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_s_w(a: v4i32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_sat_s_w(a, IMM5) } @@ -7200,7 +7197,7 @@ pub unsafe fn __msa_sat_s_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(sat_s.d, imm1 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_s_d(a: v2i64) -> v2i64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_sat_s_d(a, IMM6) } @@ -7215,7 +7212,7 @@ pub unsafe fn __msa_sat_s_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(sat_u.b, imm4 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_u_b(a: v16u8) -> v16u8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_sat_u_b(a, IMM3) } @@ -7230,7 +7227,7 @@ pub unsafe fn __msa_sat_u_b(a: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(sat_u.h, imm3 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_u_h(a: v8u16) -> v8u16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_sat_u_h(a, IMM4) } @@ -7245,7 +7242,7 @@ pub unsafe fn __msa_sat_u_h(a: v8u16) -> v8u16 { #[cfg_attr(test, assert_instr(sat_u.w, imm2 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_u_w(a: v4u32) -> v4u32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_sat_u_w(a, IMM5) } @@ -7260,7 +7257,7 @@ pub unsafe fn __msa_sat_u_w(a: v4u32) -> v4u32 { #[cfg_attr(test, assert_instr(sat_u.d, imm1 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_sat_u_d(a: v2u64) -> v2u64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_sat_u_d(a, IMM6) } @@ -7276,7 +7273,7 @@ pub unsafe fn __msa_sat_u_d(a: v2u64) -> v2u64 { #[cfg_attr(test, assert_instr(shf.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_shf_b(a: v16i8) -> v16i8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_shf_b(a, IMM8) } @@ -7292,7 +7289,7 @@ pub unsafe fn __msa_shf_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(shf.h, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_shf_h(a: v8i16) -> v8i16 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_shf_h(a, IMM8) } @@ -7308,7 +7305,7 @@ pub unsafe fn __msa_shf_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(shf.w, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_shf_w(a: v4i32) -> v4i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_shf_w(a, IMM8) } @@ -7408,7 +7405,7 @@ pub unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 { #[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8) -> v16i8 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_sldi_b(a, mem::transmute(b), IMM4) } @@ -7428,7 +7425,7 @@ pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16) -> v8i16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_sldi_h(a, mem::transmute(b), IMM3) } @@ -7448,7 +7445,7 @@ pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32) -> v4i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_sldi_w(a, mem::transmute(b), IMM2) } @@ -7468,7 +7465,7 @@ pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_sldi_d(a: v2i64, b: v2i64) -> v2i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_sldi_d(a, mem::transmute(b), IMM1) } @@ -7539,7 +7536,7 @@ pub unsafe fn __msa_sll_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(slli.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_slli_b(a: v16i8) -> v16i8 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_slli_b(a, IMM4) } @@ -7554,7 +7551,7 @@ pub unsafe fn __msa_slli_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(slli.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_slli_h(a: v8i16) -> v8i16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_slli_h(a, IMM3) } @@ -7569,7 +7566,7 @@ pub unsafe fn __msa_slli_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(slli.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_slli_w(a: v4i32) -> v4i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_slli_w(a, IMM2) } @@ -7584,7 +7581,7 @@ pub unsafe fn __msa_slli_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(slli.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_slli_d(a: v2i64) -> v2i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_slli_d(a, IMM1) } @@ -7654,7 +7651,7 @@ pub unsafe fn __msa_splat_d(a: v2i64, b: i32) -> v2i64 { #[cfg_attr(test, assert_instr(splati.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_splati_b(a: v16i8) -> v16i8 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_splati_b(a, IMM4) } @@ -7668,7 +7665,7 @@ pub unsafe fn __msa_splati_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(splati.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_splati_h(a: v8i16) -> v8i16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_splati_h(a, IMM3) } @@ -7682,7 +7679,7 @@ pub unsafe fn __msa_splati_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(splati.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_splati_w(a: v4i32) -> v4i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_splati_w(a, IMM2) } @@ -7696,7 +7693,7 @@ pub unsafe fn __msa_splati_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(splati.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_splati_d(a: v2i64) -> v2i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_splati_d(a, IMM1) } @@ -7767,7 +7764,7 @@ pub unsafe fn __msa_sra_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(srai.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srai_b(a: v16i8) -> v16i8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_srai_b(a, IMM3) } @@ -7782,7 +7779,7 @@ pub unsafe fn __msa_srai_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(srai.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srai_h(a: v8i16) -> v8i16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_srai_h(a, IMM4) } @@ -7797,7 +7794,7 @@ pub unsafe fn __msa_srai_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(srai.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srai_w(a: v4i32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_srai_w(a, IMM5) } @@ -7812,7 +7809,7 @@ pub unsafe fn __msa_srai_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(srai.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srai_d(a: v2i64) -> v2i64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_srai_d(a, IMM6) } @@ -7888,7 +7885,7 @@ pub unsafe fn __msa_srar_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(srari.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srari_b(a: v16i8) -> v16i8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_srari_b(a, IMM3) } @@ -7904,7 +7901,7 @@ pub unsafe fn __msa_srari_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(srari.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srari_h(a: v8i16) -> v8i16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_srari_h(a, IMM4) } @@ -7920,7 +7917,7 @@ pub unsafe fn __msa_srari_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(srari.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srari_w(a: v4i32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_srari_w(a, IMM5) } @@ -7936,7 +7933,7 @@ pub unsafe fn __msa_srari_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(srari.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srari_d(a: v2i64) -> v2i64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_srari_d(a, IMM6) } @@ -8007,7 +8004,7 @@ pub unsafe fn __msa_srl_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(srli.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srli_b(a: v16i8) -> v16i8 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_srli_b(a, IMM4) } @@ -8022,7 +8019,7 @@ pub unsafe fn __msa_srli_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(srli.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srli_h(a: v8i16) -> v8i16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_srli_h(a, IMM3) } @@ -8037,7 +8034,7 @@ pub unsafe fn __msa_srli_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(srli.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srli_w(a: v4i32) -> v4i32 { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); msa_srli_w(a, IMM2) } @@ -8052,7 +8049,7 @@ pub unsafe fn __msa_srli_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(srli.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srli_d(a: v2i64) -> v2i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); msa_srli_d(a, IMM1) } @@ -8128,7 +8125,7 @@ pub unsafe fn __msa_srlr_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(srlri.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srlri_b(a: v16i8) -> v16i8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); msa_srlri_b(a, IMM3) } @@ -8144,7 +8141,7 @@ pub unsafe fn __msa_srlri_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(srlri.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srlri_h(a: v8i16) -> v8i16 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); msa_srlri_h(a, IMM4) } @@ -8160,7 +8157,7 @@ pub unsafe fn __msa_srlri_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(srlri.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srlri_w(a: v4i32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_srlri_w(a, IMM5) } @@ -8176,7 +8173,7 @@ pub unsafe fn __msa_srlri_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(srlri.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_srlri_d(a: v2i64) -> v2i64 { - static_assert_imm6!(IMM6); + static_assert_uimm_bits!(IMM6, 6); msa_srlri_d(a, IMM6) } @@ -8191,7 +8188,7 @@ pub unsafe fn __msa_srlri_d(a: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8) -> () { - static_assert_imm_s10!(IMM_S10); + static_assert_simm_bits!(IMM_S10, 10); msa_st_b(a, mem_addr, IMM_S10) } @@ -8206,8 +8203,8 @@ pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8) -> () #[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8) -> () { - static_assert_imm_s11!(IMM_S11); - static_assert!(IMM_S11: i32 where IMM_S11 % 2 == 0); + static_assert_simm_bits!(IMM_S11, 11); + static_assert!(IMM_S11 % 2 == 0); msa_st_h(a, mem_addr, IMM_S11) } @@ -8222,8 +8219,8 @@ pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8) -> () #[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8) -> () { - static_assert_imm_s12!(IMM_S12); - static_assert!(IMM_S12: i32 where IMM_S12 % 4 == 0); + static_assert_simm_bits!(IMM_S12, 12); + static_assert!(IMM_S12 % 4 == 0); msa_st_w(a, mem_addr, IMM_S12) } @@ -8238,8 +8235,8 @@ pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8) -> () #[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))] #[rustc_legacy_const_generics(2)] pub unsafe fn __msa_st_d(a: v2i64, mem_addr: *mut u8) -> () { - static_assert_imm_s13!(IMM_S13); - static_assert!(IMM_S13: i32 where IMM_S13 % 8 == 0); + static_assert_simm_bits!(IMM_S13, 13); + static_assert!(IMM_S13 % 8 == 0); msa_st_d(a, mem_addr, IMM_S13) } @@ -8530,7 +8527,7 @@ pub unsafe fn __msa_subv_d(a: v2i64, b: v2i64) -> v2i64 { #[cfg_attr(test, assert_instr(subvi.b, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_subvi_b(a: v16i8) -> v16i8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_subvi_b(a, IMM5) } @@ -8545,7 +8542,7 @@ pub unsafe fn __msa_subvi_b(a: v16i8) -> v16i8 { #[cfg_attr(test, assert_instr(subvi.h, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_subvi_h(a: v8i16) -> v8i16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_subvi_h(a, IMM5) } @@ -8560,7 +8557,7 @@ pub unsafe fn __msa_subvi_h(a: v8i16) -> v8i16 { #[cfg_attr(test, assert_instr(subvi.w, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_subvi_w(a: v4i32) -> v4i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_subvi_w(a, IMM5) } @@ -8575,7 +8572,7 @@ pub unsafe fn __msa_subvi_w(a: v4i32) -> v4i32 { #[cfg_attr(test, assert_instr(subvi.d, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_subvi_d(a: v2i64) -> v2i64 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); msa_subvi_d(a, IMM5) } @@ -8673,7 +8670,7 @@ pub unsafe fn __msa_xor_v(a: v16u8, b: v16u8) -> v16u8 { #[cfg_attr(test, assert_instr(xori.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] pub unsafe fn __msa_xori_b(a: v16u8) -> v16u8 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); msa_xori_b(a, IMM8) } diff --git a/library/stdarch/crates/core_arch/src/mips/msa/macros.rs b/library/stdarch/crates/core_arch/src/mips/msa/macros.rs deleted file mode 100644 index de8905840d71..000000000000 --- a/library/stdarch/crates/core_arch/src/mips/msa/macros.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Utility macros. - -macro_rules! static_assert_imm_s5 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -16, 15>::VALID; - }; -} - -macro_rules! static_assert_imm_s10 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -512, 511>::VALID; - }; -} - -macro_rules! static_assert_imm_s11 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -1024, 1023>::VALID; - }; -} - -macro_rules! static_assert_imm_s12 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -2048, 2047>::VALID; - }; -} - -macro_rules! static_assert_imm_s13 { - ($imm:ident) => { - let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -4096, 4095>::VALID; - }; -} diff --git a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs index cab6623e2397..283a7e5ceb66 100644 --- a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs +++ b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs @@ -80,7 +80,7 @@ pub unsafe fn vec_xxpermdi(a: T, b: T) -> T where T: sealed::VectorPermDI, { - static_assert_imm2!(DM); + static_assert_uimm_bits!(DM, 2); a.vec_xxpermdi(b, DM as u8) } diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs index a41b88f6127a..ed021df5a91b 100644 --- a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs +++ b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs @@ -691,7 +691,7 @@ pub fn sm3p1(x: u32) -> u32 { #[inline] #[target_feature(enable = "zksed")] pub fn sm4ed(x: u32, a: u32) -> u32 { - static_assert!(BS: u8 where BS <= 3); + static_assert!(BS <= 3); let ans: u32; unsafe { asm!("sm4ed {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) a, const BS, options(pure, nomem, nostack)) @@ -751,7 +751,7 @@ pub fn sm4ed(x: u32, a: u32) -> u32 { #[inline] #[target_feature(enable = "zksed")] pub fn sm4ks(x: u32, k: u32) -> u32 { - static_assert!(BS: u8 where BS <= 3); + static_assert!(BS <= 3); let ans: u32; unsafe { asm!("sm4ks {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) k, const BS, options(pure, nomem, nostack)) diff --git a/library/stdarch/crates/core_arch/src/wasm32/memory.rs b/library/stdarch/crates/core_arch/src/wasm32/memory.rs index b5cf13e9848f..882e068152aa 100644 --- a/library/stdarch/crates/core_arch/src/wasm32/memory.rs +++ b/library/stdarch/crates/core_arch/src/wasm32/memory.rs @@ -26,7 +26,7 @@ extern "C" { #[stable(feature = "simd_wasm32", since = "1.33.0")] #[doc(alias("memory.size"))] pub fn memory_size() -> usize { - static_assert!(MEM: u32 where MEM == 0); + static_assert!(MEM == 0); unsafe { llvm_memory_size(MEM) } } @@ -52,7 +52,7 @@ pub fn memory_size() -> usize { #[doc(alias("memory.grow"))] pub fn memory_grow(delta: usize) -> usize { unsafe { - static_assert!(MEM: u32 where MEM == 0); + static_assert!(MEM == 0); llvm_memory_grow(MEM, delta) } } diff --git a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs index ed3c40c200ef..01d09caa6c92 100644 --- a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs +++ b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs @@ -937,22 +937,22 @@ pub fn i8x16_shuffle< a: v128, b: v128, ) -> v128 { - static_assert!(I0: usize where I0 < 32); - static_assert!(I1: usize where I1 < 32); - static_assert!(I2: usize where I2 < 32); - static_assert!(I3: usize where I3 < 32); - static_assert!(I4: usize where I4 < 32); - static_assert!(I5: usize where I5 < 32); - static_assert!(I6: usize where I6 < 32); - static_assert!(I7: usize where I7 < 32); - static_assert!(I8: usize where I8 < 32); - static_assert!(I9: usize where I9 < 32); - static_assert!(I10: usize where I10 < 32); - static_assert!(I11: usize where I11 < 32); - static_assert!(I12: usize where I12 < 32); - static_assert!(I13: usize where I13 < 32); - static_assert!(I14: usize where I14 < 32); - static_assert!(I15: usize where I15 < 32); + static_assert!(I0 < 32); + static_assert!(I1 < 32); + static_assert!(I2 < 32); + static_assert!(I3 < 32); + static_assert!(I4 < 32); + static_assert!(I5 < 32); + static_assert!(I6 < 32); + static_assert!(I7 < 32); + static_assert!(I8 < 32); + static_assert!(I9 < 32); + static_assert!(I10 < 32); + static_assert!(I11 < 32); + static_assert!(I12 < 32); + static_assert!(I13 < 32); + static_assert!(I14 < 32); + static_assert!(I15 < 32); let shuf: simd::u8x16 = unsafe { simd_shuffle!( a.as_u8x16(), @@ -1007,14 +1007,14 @@ pub fn i16x8_shuffle< a: v128, b: v128, ) -> v128 { - static_assert!(I0: usize where I0 < 16); - static_assert!(I1: usize where I1 < 16); - static_assert!(I2: usize where I2 < 16); - static_assert!(I3: usize where I3 < 16); - static_assert!(I4: usize where I4 < 16); - static_assert!(I5: usize where I5 < 16); - static_assert!(I6: usize where I6 < 16); - static_assert!(I7: usize where I7 < 16); + static_assert!(I0 < 16); + static_assert!(I1 < 16); + static_assert!(I2 < 16); + static_assert!(I3 < 16); + static_assert!(I4 < 16); + static_assert!(I5 < 16); + static_assert!(I6 < 16); + static_assert!(I7 < 16); let shuf: simd::u16x8 = unsafe { simd_shuffle!( a.as_u16x8(), @@ -1047,10 +1047,10 @@ pub fn i32x4_shuffle v128 { - static_assert!(I0: usize where I0 < 8); - static_assert!(I1: usize where I1 < 8); - static_assert!(I2: usize where I2 < 8); - static_assert!(I3: usize where I3 < 8); + static_assert!(I0 < 8); + static_assert!(I1 < 8); + static_assert!(I2 < 8); + static_assert!(I3 < 8); let shuf: simd::u32x4 = unsafe { simd_shuffle!( a.as_u32x4(), @@ -1077,8 +1077,8 @@ pub use i32x4_shuffle as u32x4_shuffle; #[doc(alias("i8x16.shuffle"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_shuffle(a: v128, b: v128) -> v128 { - static_assert!(I0: usize where I0 < 4); - static_assert!(I1: usize where I1 < 4); + static_assert!(I0 < 4); + static_assert!(I1 < 4); let shuf: simd::u64x2 = unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) }; shuf.v128() @@ -1097,7 +1097,7 @@ pub use i64x2_shuffle as u64x2_shuffle; #[doc(alias("i8x16.extract_lane_s"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_extract_lane(a: v128) -> i8 { - static_assert!(N: usize where N < 16); + static_assert!(N < 16); unsafe { simd_extract(a.as_i8x16(), N as u32) } } @@ -1111,7 +1111,7 @@ pub fn i8x16_extract_lane(a: v128) -> i8 { #[doc(alias("i8x16.extract_lane_u"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_extract_lane(a: v128) -> u8 { - static_assert!(N: usize where N < 16); + static_assert!(N < 16); unsafe { simd_extract(a.as_u8x16(), N as u32) } } @@ -1125,7 +1125,7 @@ pub fn u8x16_extract_lane(a: v128) -> u8 { #[doc(alias("i8x16.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { - static_assert!(N: usize where N < 16); + static_assert!(N < 16); unsafe { simd_insert(a.as_i8x16(), N as u32, val).v128() } } @@ -1139,7 +1139,7 @@ pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { #[doc(alias("i8x16.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { - static_assert!(N: usize where N < 16); + static_assert!(N < 16); unsafe { simd_insert(a.as_u8x16(), N as u32, val).v128() } } @@ -1153,7 +1153,7 @@ pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { #[doc(alias("i16x8.extract_lane_s"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extract_lane(a: v128) -> i16 { - static_assert!(N: usize where N < 8); + static_assert!(N < 8); unsafe { simd_extract(a.as_i16x8(), N as u32) } } @@ -1167,7 +1167,7 @@ pub fn i16x8_extract_lane(a: v128) -> i16 { #[doc(alias("i16x8.extract_lane_u"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_extract_lane(a: v128) -> u16 { - static_assert!(N: usize where N < 8); + static_assert!(N < 8); unsafe { simd_extract(a.as_u16x8(), N as u32) } } @@ -1181,7 +1181,7 @@ pub fn u16x8_extract_lane(a: v128) -> u16 { #[doc(alias("i16x8.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { - static_assert!(N: usize where N < 8); + static_assert!(N < 8); unsafe { simd_insert(a.as_i16x8(), N as u32, val).v128() } } @@ -1195,7 +1195,7 @@ pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { #[doc(alias("i16x8.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { - static_assert!(N: usize where N < 8); + static_assert!(N < 8); unsafe { simd_insert(a.as_u16x8(), N as u32, val).v128() } } @@ -1209,7 +1209,7 @@ pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { #[doc(alias("i32x4.extract_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extract_lane(a: v128) -> i32 { - static_assert!(N: usize where N < 4); + static_assert!(N < 4); unsafe { simd_extract(a.as_i32x4(), N as u32) } } @@ -1235,7 +1235,7 @@ pub fn u32x4_extract_lane(a: v128) -> u32 { #[doc(alias("i32x4.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_replace_lane(a: v128, val: i32) -> v128 { - static_assert!(N: usize where N < 4); + static_assert!(N < 4); unsafe { simd_insert(a.as_i32x4(), N as u32, val).v128() } } @@ -1261,7 +1261,7 @@ pub fn u32x4_replace_lane(a: v128, val: u32) -> v128 { #[doc(alias("i64x2.extract_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extract_lane(a: v128) -> i64 { - static_assert!(N: usize where N < 2); + static_assert!(N < 2); unsafe { simd_extract(a.as_i64x2(), N as u32) } } @@ -1287,7 +1287,7 @@ pub fn u64x2_extract_lane(a: v128) -> u64 { #[doc(alias("i64x2.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_replace_lane(a: v128, val: i64) -> v128 { - static_assert!(N: usize where N < 2); + static_assert!(N < 2); unsafe { simd_insert(a.as_i64x2(), N as u32, val).v128() } } @@ -1313,7 +1313,7 @@ pub fn u64x2_replace_lane(a: v128, val: u64) -> v128 { #[doc(alias("f32x4.extract_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_extract_lane(a: v128) -> f32 { - static_assert!(N: usize where N < 4); + static_assert!(N < 4); unsafe { simd_extract(a.as_f32x4(), N as u32) } } @@ -1327,7 +1327,7 @@ pub fn f32x4_extract_lane(a: v128) -> f32 { #[doc(alias("f32x4.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { - static_assert!(N: usize where N < 4); + static_assert!(N < 4); unsafe { simd_insert(a.as_f32x4(), N as u32, val).v128() } } @@ -1341,7 +1341,7 @@ pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { #[doc(alias("f64x2.extract_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_extract_lane(a: v128) -> f64 { - static_assert!(N: usize where N < 2); + static_assert!(N < 2); unsafe { simd_extract(a.as_f64x2(), N as u32) } } @@ -1355,7 +1355,7 @@ pub fn f64x2_extract_lane(a: v128) -> f64 { #[doc(alias("f64x2.replace_lane"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_replace_lane(a: v128, val: f64) -> v128 { - static_assert!(N: usize where N < 2); + static_assert!(N < 2); unsafe { simd_insert(a.as_f64x2(), N as u32, val).v128() } } diff --git a/library/stdarch/crates/core_arch/src/x86/aes.rs b/library/stdarch/crates/core_arch/src/x86/aes.rs index c8936a7d27ec..0346c8e05b56 100644 --- a/library/stdarch/crates/core_arch/src/x86/aes.rs +++ b/library/stdarch/crates/core_arch/src/x86/aes.rs @@ -96,7 +96,7 @@ pub unsafe fn _mm_aesimc_si128(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_aeskeygenassist_si128(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); aeskeygenassist(a, IMM8 as u8) } diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs index 86b28ef6810c..a77005c0ea12 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx.rs @@ -117,7 +117,7 @@ pub unsafe fn _mm256_or_ps(a: __m256, b: __m256) -> __m256 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shuffle_pd(a: __m256d, b: __m256d) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, b, @@ -140,7 +140,7 @@ pub unsafe fn _mm256_shuffle_pd(a: __m256d, b: __m256d) -> __m2 #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shuffle_ps(a: __m256, b: __m256) -> __m256 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, b, @@ -350,7 +350,7 @@ pub unsafe fn _mm256_div_pd(a: __m256d, b: __m256d) -> __m256d { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_round_pd(a: __m256d) -> __m256d { - static_assert_imm4!(ROUNDING); + static_assert_uimm_bits!(ROUNDING, 4); roundpd256(a, ROUNDING) } @@ -397,7 +397,7 @@ pub unsafe fn _mm256_floor_pd(a: __m256d) -> __m256d { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_round_ps(a: __m256) -> __m256 { - static_assert_imm4!(ROUNDING); + static_assert_uimm_bits!(ROUNDING, 4); roundps256(a, ROUNDING) } @@ -462,7 +462,7 @@ pub unsafe fn _mm256_sqrt_pd(a: __m256d) -> __m256d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blend_pd(a: __m256d, b: __m256d) -> __m256d { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); simd_shuffle!( a, b, @@ -485,7 +485,7 @@ pub unsafe fn _mm256_blend_pd(a: __m256d, b: __m256d) -> __m256 #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blend_ps(a: __m256, b: __m256) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); simd_shuffle!( a, b, @@ -538,7 +538,7 @@ pub unsafe fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_dp_ps(a: __m256, b: __m256) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); vdpps(a, b, IMM8) } @@ -737,7 +737,7 @@ pub const _CMP_TRUE_US: i32 = 0x1f; #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_pd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); vcmppd(a, b, IMM5 as i8) } @@ -752,7 +752,7 @@ pub unsafe fn _mm_cmp_pd(a: __m128d, b: __m128d) -> __m128d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cmp_pd(a: __m256d, b: __m256d) -> __m256d { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); vcmppd256(a, b, IMM5 as u8) } @@ -767,7 +767,7 @@ pub unsafe fn _mm256_cmp_pd(a: __m256d, b: __m256d) -> __m256d #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_ps(a: __m128, b: __m128) -> __m128 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); vcmpps(a, b, IMM5 as i8) } @@ -782,7 +782,7 @@ pub unsafe fn _mm_cmp_ps(a: __m128, b: __m128) -> __m128 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cmp_ps(a: __m256, b: __m256) -> __m256 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); vcmpps256(a, b, IMM5 as u8) } @@ -799,7 +799,7 @@ pub unsafe fn _mm256_cmp_ps(a: __m256, b: __m256) -> __m256 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_sd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); vcmpsd(a, b, IMM5 as i8) } @@ -816,7 +816,7 @@ pub unsafe fn _mm_cmp_sd(a: __m128d, b: __m128d) -> __m128d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_ss(a: __m128, b: __m128) -> __m128 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); vcmpss(a, b, IMM5 as i8) } @@ -929,7 +929,7 @@ pub unsafe fn _mm256_cvttps_epi32(a: __m256) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extractf128_ps(a: __m256) -> __m128 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); simd_shuffle!( a, _mm256_undefined_ps(), @@ -950,7 +950,7 @@ pub unsafe fn _mm256_extractf128_ps(a: __m256) -> __m128 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extractf128_pd(a: __m256d) -> __m128d { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); simd_shuffle!(a, _mm256_undefined_pd(), [[0, 1], [2, 3]][IMM1 as usize]) } @@ -966,7 +966,7 @@ pub unsafe fn _mm256_extractf128_pd(a: __m256d) -> __m128d { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extractf128_si256(a: __m256i) -> __m128i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let dst: i64x2 = simd_shuffle!( a.as_i64x4(), _mm256_undefined_si256().as_i64x4(), @@ -1032,7 +1032,7 @@ pub unsafe fn _mm_permutevar_ps(a: __m128, b: __m128i) -> __m128 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute_ps(a: __m256) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); simd_shuffle!( a, _mm256_undefined_ps(), @@ -1059,7 +1059,7 @@ pub unsafe fn _mm256_permute_ps(a: __m256) -> __m256 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_permute_ps(a: __m128) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); simd_shuffle!( a, _mm_undefined_ps(), @@ -1106,7 +1106,7 @@ pub unsafe fn _mm_permutevar_pd(a: __m128d, b: __m128i) -> __m128d { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute_pd(a: __m256d) -> __m256d { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); simd_shuffle!( a, _mm256_undefined_pd(), @@ -1129,7 +1129,7 @@ pub unsafe fn _mm256_permute_pd(a: __m256d) -> __m256d { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_permute_pd(a: __m128d) -> __m128d { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); simd_shuffle!( a, _mm_undefined_pd(), @@ -1147,7 +1147,7 @@ pub unsafe fn _mm_permute_pd(a: __m128d) -> __m128d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2f128_ps(a: __m256, b: __m256) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); vperm2f128ps256(a, b, IMM8 as i8) } @@ -1161,7 +1161,7 @@ pub unsafe fn _mm256_permute2f128_ps(a: __m256, b: __m256) -> _ #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2f128_pd(a: __m256d, b: __m256d) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); vperm2f128pd256(a, b, IMM8 as i8) } @@ -1175,7 +1175,7 @@ pub unsafe fn _mm256_permute2f128_pd(a: __m256d, b: __m256d) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2f128_si256(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vperm2f128si256(a.as_i32x8(), b.as_i32x8(), IMM8 as i8)) } @@ -1256,7 +1256,7 @@ pub unsafe fn _mm256_broadcast_pd(a: &__m128d) -> __m256d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insertf128_ps(a: __m256, b: __m128) -> __m256 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); simd_shuffle!( a, _mm256_castps128_ps256(b), @@ -1278,7 +1278,7 @@ pub unsafe fn _mm256_insertf128_ps(a: __m256, b: __m128) -> __m #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insertf128_pd(a: __m256d, b: __m128d) -> __m256d { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); simd_shuffle!( a, _mm256_castpd128_pd256(b), @@ -1299,7 +1299,7 @@ pub unsafe fn _mm256_insertf128_pd(a: __m256d, b: __m128d) -> _ #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insertf128_si256(a: __m256i, b: __m128i) -> __m256i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let dst: i64x4 = simd_shuffle!( a.as_i64x4(), _mm256_castsi128_si256(b).as_i64x4(), @@ -1318,7 +1318,7 @@ pub unsafe fn _mm256_insertf128_si256(a: __m256i, b: __m128i) - #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi8(a: __m256i, i: i8) -> __m256i { - static_assert_imm5!(INDEX); + static_assert_uimm_bits!(INDEX, 5); transmute(simd_insert(a.as_i8x32(), INDEX as u32, i)) } @@ -1332,7 +1332,7 @@ pub unsafe fn _mm256_insert_epi8(a: __m256i, i: i8) -> __m256i #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi16(a: __m256i, i: i16) -> __m256i { - static_assert_imm4!(INDEX); + static_assert_uimm_bits!(INDEX, 4); transmute(simd_insert(a.as_i16x16(), INDEX as u32, i)) } @@ -1346,7 +1346,7 @@ pub unsafe fn _mm256_insert_epi16(a: __m256i, i: i16) -> __m25 #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi32(a: __m256i, i: i32) -> __m256i { - static_assert_imm3!(INDEX); + static_assert_uimm_bits!(INDEX, 3); transmute(simd_insert(a.as_i32x8(), INDEX as u32, i)) } diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs index 2171185bf90a..5262628e10b5 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs @@ -157,7 +157,7 @@ pub unsafe fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_alignr_epi8(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); // If palignr is shifting the pair of vectors more than the size of two // lanes, emit zero. if IMM8 > 32 { @@ -367,7 +367,7 @@ pub unsafe fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_blend_epi32(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); let a = a.as_i32x4(); let b = b.as_i32x4(); let r: i32x4 = simd_shuffle!( @@ -392,7 +392,7 @@ pub unsafe fn _mm_blend_epi32(a: __m128i, b: __m128i) -> __m128 #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blend_epi32(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let b = b.as_i32x8(); let r: i32x8 = simd_shuffle!( @@ -421,7 +421,7 @@ pub unsafe fn _mm256_blend_epi32(a: __m256i, b: __m256i) -> __m #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blend_epi16(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x16(); let b = b.as_i16x16(); @@ -888,7 +888,7 @@ pub unsafe fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extracti128_si256(a: __m256i) -> __m128i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let a = a.as_i64x4(); let b = _mm256_undefined_si256().as_i64x4(); let dst: i64x2 = simd_shuffle!(a, b, [[0, 1], [2, 3]][IMM1 as usize]); @@ -1710,7 +1710,7 @@ pub unsafe fn _mm256_mask_i64gather_pd( #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_inserti128_si256(a: __m256i, b: __m128i) -> __m256i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let a = a.as_i64x4(); let b = _mm256_castsi128_si256(b).as_i64x4(); let dst: i64x4 = simd_shuffle!(a, b, [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize]); @@ -2045,7 +2045,7 @@ pub unsafe fn _mm256_movemask_epi8(a: __m256i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_mpsadbw_epu8(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(mpsadbw(a.as_u8x32(), b.as_u8x32(), IMM8)) } @@ -2226,7 +2226,7 @@ pub unsafe fn _mm256_permutevar8x32_epi32(a: __m256i, b: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute4x64_epi64(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let zero = _mm256_setzero_si256().as_i64x4(); let r: i64x4 = simd_shuffle!( a.as_i64x4(), @@ -2250,7 +2250,7 @@ pub unsafe fn _mm256_permute4x64_epi64(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2x128_si256(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vperm2i128(a.as_i64x4(), b.as_i64x4(), IMM8 as i8)) } @@ -2264,7 +2264,7 @@ pub unsafe fn _mm256_permute2x128_si256(a: __m256i, b: __m256i) #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute4x64_pd(a: __m256d) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); simd_shuffle!( a, _mm256_undefined_pd(), @@ -2377,7 +2377,7 @@ pub unsafe fn _mm256_shuffle_epi8(a: __m256i, b: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shuffle_epi32(a: __m256i) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r: i32x8 = simd_shuffle!( a.as_i32x8(), a.as_i32x8(), @@ -2406,7 +2406,7 @@ pub unsafe fn _mm256_shuffle_epi32(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shufflehi_epi16(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x16(); let r: i16x16 = simd_shuffle!( a, @@ -2444,7 +2444,7 @@ pub unsafe fn _mm256_shufflehi_epi16(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shufflelo_epi16(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x16(); let r: i16x16 = simd_shuffle!( a, @@ -2556,7 +2556,7 @@ pub unsafe fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_slli_epi16(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pslliw(a.as_i16x16(), IMM8)) } @@ -2570,7 +2570,7 @@ pub unsafe fn _mm256_slli_epi16(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_slli_epi32(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psllid(a.as_i32x8(), IMM8)) } @@ -2584,7 +2584,7 @@ pub unsafe fn _mm256_slli_epi32(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_slli_epi64(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pslliq(a.as_i64x4(), IMM8)) } @@ -2597,7 +2597,7 @@ pub unsafe fn _mm256_slli_epi64(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_slli_si256(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); _mm256_bslli_epi128::(a) } @@ -2610,7 +2610,7 @@ pub unsafe fn _mm256_slli_si256(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_bslli_epi128(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); const fn mask(shift: i32, i: u32) -> u32 { let shift = shift as u32 & 0xff; if shift > 15 || i % 16 < shift { @@ -2748,7 +2748,7 @@ pub unsafe fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_srai_epi16(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psraiw(a.as_i16x16(), IMM8)) } @@ -2762,7 +2762,7 @@ pub unsafe fn _mm256_srai_epi16(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_srai_epi32(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psraid(a.as_i32x8(), IMM8)) } @@ -2799,7 +2799,7 @@ pub unsafe fn _mm256_srav_epi32(a: __m256i, count: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_srli_si256(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); _mm256_bsrli_epi128::(a) } @@ -2812,7 +2812,7 @@ pub unsafe fn _mm256_srli_si256(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_bsrli_epi128(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i8x32(); let zero = _mm256_setzero_si256().as_i8x32(); let r: i8x32 = match IMM8 % 16 { @@ -2995,7 +2995,7 @@ pub unsafe fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_srli_epi16(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psrliw(a.as_i16x16(), IMM8)) } @@ -3009,7 +3009,7 @@ pub unsafe fn _mm256_srli_epi16(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_srli_epi32(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psrlid(a.as_i32x8(), IMM8)) } @@ -3023,7 +3023,7 @@ pub unsafe fn _mm256_srli_epi32(a: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_srli_epi64(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psrliq(a.as_i64x4(), IMM8)) } @@ -3543,7 +3543,7 @@ pub unsafe fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extract_epi8(a: __m256i) -> i32 { - static_assert_imm5!(INDEX); + static_assert_uimm_bits!(INDEX, 5); simd_extract::<_, u8>(a.as_u8x32(), INDEX as u32) as i32 } @@ -3559,7 +3559,7 @@ pub unsafe fn _mm256_extract_epi8(a: __m256i) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extract_epi16(a: __m256i) -> i32 { - static_assert_imm4!(INDEX); + static_assert_uimm_bits!(INDEX, 4); simd_extract::<_, u16>(a.as_u16x16(), INDEX as u32) as i32 } @@ -3572,7 +3572,7 @@ pub unsafe fn _mm256_extract_epi16(a: __m256i) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extract_epi32(a: __m256i) -> i32 { - static_assert_imm3!(INDEX); + static_assert_uimm_bits!(INDEX, 3); simd_extract(a.as_i32x8(), INDEX as u32) } diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs index 33103f16ec6a..0ef919617a49 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs @@ -3700,7 +3700,7 @@ pub unsafe fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) - #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x32(); let b = b.as_u16x32(); let r = vpcmpuw(a, b, IMM8, 0b11111111_11111111_11111111_11111111); @@ -3719,7 +3719,7 @@ pub unsafe fn _mm512_mask_cmp_epu16_mask( a: __m512i, b: __m512i, ) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x32(); let b = b.as_u16x32(); let r = vpcmpuw(a, b, IMM8, k1); @@ -3734,7 +3734,7 @@ pub unsafe fn _mm512_mask_cmp_epu16_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x16(); let b = b.as_u16x16(); let r = vpcmpuw256(a, b, IMM8, 0b11111111_11111111); @@ -3753,7 +3753,7 @@ pub unsafe fn _mm256_mask_cmp_epu16_mask( a: __m256i, b: __m256i, ) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x16(); let b = b.as_u16x16(); let r = vpcmpuw256(a, b, IMM8, k1); @@ -3768,7 +3768,7 @@ pub unsafe fn _mm256_mask_cmp_epu16_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x8(); let b = b.as_u16x8(); let r = vpcmpuw128(a, b, IMM8, 0b11111111); @@ -3787,7 +3787,7 @@ pub unsafe fn _mm_mask_cmp_epu16_mask( a: __m128i, b: __m128i, ) -> __mmask8 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x8(); let b = b.as_u16x8(); let r = vpcmpuw128(a, b, IMM8, k1); @@ -3802,7 +3802,7 @@ pub unsafe fn _mm_mask_cmp_epu16_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x64(); let b = b.as_u8x64(); let r = vpcmpub( @@ -3826,7 +3826,7 @@ pub unsafe fn _mm512_mask_cmp_epu8_mask( a: __m512i, b: __m512i, ) -> __mmask64 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x64(); let b = b.as_u8x64(); let r = vpcmpub(a, b, IMM8, k1); @@ -3841,7 +3841,7 @@ pub unsafe fn _mm512_mask_cmp_epu8_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x32(); let b = b.as_u8x32(); let r = vpcmpub256(a, b, IMM8, 0b11111111_11111111_11111111_11111111); @@ -3860,7 +3860,7 @@ pub unsafe fn _mm256_mask_cmp_epu8_mask( a: __m256i, b: __m256i, ) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x32(); let b = b.as_u8x32(); let r = vpcmpub256(a, b, IMM8, k1); @@ -3875,7 +3875,7 @@ pub unsafe fn _mm256_mask_cmp_epu8_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x16(); let b = b.as_u8x16(); let r = vpcmpub128(a, b, IMM8, 0b11111111_11111111); @@ -3894,7 +3894,7 @@ pub unsafe fn _mm_mask_cmp_epu8_mask( a: __m128i, b: __m128i, ) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x16(); let b = b.as_u8x16(); let r = vpcmpub128(a, b, IMM8, k1); @@ -3909,7 +3909,7 @@ pub unsafe fn _mm_mask_cmp_epu8_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x32(); let b = b.as_i16x32(); let r = vpcmpw(a, b, IMM8, 0b11111111_11111111_11111111_11111111); @@ -3928,7 +3928,7 @@ pub unsafe fn _mm512_mask_cmp_epi16_mask( a: __m512i, b: __m512i, ) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x32(); let b = b.as_i16x32(); let r = vpcmpw(a, b, IMM8, k1); @@ -3943,7 +3943,7 @@ pub unsafe fn _mm512_mask_cmp_epi16_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x16(); let b = b.as_i16x16(); let r = vpcmpw256(a, b, IMM8, 0b11111111_11111111); @@ -3962,7 +3962,7 @@ pub unsafe fn _mm256_mask_cmp_epi16_mask( a: __m256i, b: __m256i, ) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x16(); let b = b.as_i16x16(); let r = vpcmpw256(a, b, IMM8, k1); @@ -3977,7 +3977,7 @@ pub unsafe fn _mm256_mask_cmp_epi16_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x8(); let b = b.as_i16x8(); let r = vpcmpw128(a, b, IMM8, 0b11111111); @@ -3996,7 +3996,7 @@ pub unsafe fn _mm_mask_cmp_epi16_mask( a: __m128i, b: __m128i, ) -> __mmask8 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x8(); let b = b.as_i16x8(); let r = vpcmpw128(a, b, IMM8, k1); @@ -4011,7 +4011,7 @@ pub unsafe fn _mm_mask_cmp_epi16_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x64(); let b = b.as_i8x64(); let r = vpcmpb( @@ -4035,7 +4035,7 @@ pub unsafe fn _mm512_mask_cmp_epi8_mask( a: __m512i, b: __m512i, ) -> __mmask64 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x64(); let b = b.as_i8x64(); let r = vpcmpb(a, b, IMM8, k1); @@ -4050,7 +4050,7 @@ pub unsafe fn _mm512_mask_cmp_epi8_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x32(); let b = b.as_i8x32(); let r = vpcmpb256(a, b, IMM8, 0b11111111_11111111_11111111_11111111); @@ -4069,7 +4069,7 @@ pub unsafe fn _mm256_mask_cmp_epi8_mask( a: __m256i, b: __m256i, ) -> __mmask32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x32(); let b = b.as_i8x32(); let r = vpcmpb256(a, b, IMM8, k1); @@ -4084,7 +4084,7 @@ pub unsafe fn _mm256_mask_cmp_epi8_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x16(); let b = b.as_i8x16(); let r = vpcmpb128(a, b, IMM8, 0b11111111_11111111); @@ -4103,7 +4103,7 @@ pub unsafe fn _mm_mask_cmp_epi8_mask( a: __m128i, b: __m128i, ) -> __mmask16 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x16(); let b = b.as_i8x16(); let r = vpcmpb128(a, b, IMM8, k1); @@ -5338,7 +5338,7 @@ pub unsafe fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __ #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_slli_epi16(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let r = vpslliw(a, IMM8); transmute(r) @@ -5356,7 +5356,7 @@ pub unsafe fn _mm512_mask_slli_epi16( k: __mmask32, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let shf = vpslliw(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i16x32())) @@ -5370,7 +5370,7 @@ pub unsafe fn _mm512_mask_slli_epi16( #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let shf = vpslliw(a, IMM8); let zero = _mm512_setzero_si512().as_i16x32(); @@ -5389,7 +5389,7 @@ pub unsafe fn _mm256_mask_slli_epi16( k: __mmask16, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliw256(a.as_i16x16(), imm8); transmute(simd_select_bitmask(k, r, src.as_i16x16())) @@ -5403,7 +5403,7 @@ pub unsafe fn _mm256_mask_slli_epi16( #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliw256(a.as_i16x16(), imm8); let zero = _mm256_setzero_si256().as_i16x16(); @@ -5422,7 +5422,7 @@ pub unsafe fn _mm_mask_slli_epi16( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliw128(a.as_i16x8(), imm8); transmute(simd_select_bitmask(k, r, src.as_i16x8())) @@ -5436,7 +5436,7 @@ pub unsafe fn _mm_mask_slli_epi16( #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliw128(a.as_i16x8(), imm8); let zero = _mm_setzero_si128().as_i16x8(); @@ -5654,7 +5654,7 @@ pub unsafe fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __ #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srli_epi16(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let r = vpsrliw(a, IMM8); transmute(r) @@ -5672,7 +5672,7 @@ pub unsafe fn _mm512_mask_srli_epi16( k: __mmask32, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let shf = vpsrliw(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i16x32())) @@ -5686,7 +5686,7 @@ pub unsafe fn _mm512_mask_srli_epi16( #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); //imm8 should be u32, it seems the document to verify is incorrect let a = a.as_i16x32(); let shf = vpsrliw(a, IMM8 as u32); @@ -5706,7 +5706,7 @@ pub unsafe fn _mm256_mask_srli_epi16( k: __mmask16, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_srli_epi16::(a); transmute(simd_select_bitmask(k, shf.as_i16x16(), src.as_i16x16())) } @@ -5719,7 +5719,7 @@ pub unsafe fn _mm256_mask_srli_epi16( #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_srli_epi16::(a); let zero = _mm256_setzero_si256().as_i16x16(); transmute(simd_select_bitmask(k, shf.as_i16x16(), zero)) @@ -5737,7 +5737,7 @@ pub unsafe fn _mm_mask_srli_epi16( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf = _mm_srli_epi16::(a); transmute(simd_select_bitmask(k, shf.as_i16x8(), src.as_i16x8())) } @@ -5750,7 +5750,7 @@ pub unsafe fn _mm_mask_srli_epi16( #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf = _mm_srli_epi16::(a); let zero = _mm_setzero_si128().as_i16x8(); transmute(simd_select_bitmask(k, shf.as_i16x8(), zero)) @@ -5967,7 +5967,7 @@ pub unsafe fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __ #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srai_epi16(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let r = vpsraiw(a, IMM8); transmute(r) @@ -5985,7 +5985,7 @@ pub unsafe fn _mm512_mask_srai_epi16( k: __mmask32, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let shf = vpsraiw(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i16x32())) @@ -5999,7 +5999,7 @@ pub unsafe fn _mm512_mask_srai_epi16( #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let shf = vpsraiw(a, IMM8); let zero = _mm512_setzero_si512().as_i16x32(); @@ -6018,7 +6018,7 @@ pub unsafe fn _mm256_mask_srai_epi16( k: __mmask16, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psraiw256(a.as_i16x16(), imm8); transmute(simd_select_bitmask(k, r, src.as_i16x16())) @@ -6032,7 +6032,7 @@ pub unsafe fn _mm256_mask_srai_epi16( #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psraiw256(a.as_i16x16(), imm8); let zero = _mm256_setzero_si256().as_i16x16(); @@ -6051,7 +6051,7 @@ pub unsafe fn _mm_mask_srai_epi16( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psraiw128(a.as_i16x8(), imm8); transmute(simd_select_bitmask(k, r, src.as_i16x8())) @@ -6065,7 +6065,7 @@ pub unsafe fn _mm_mask_srai_epi16( #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psraiw128(a.as_i16x8(), imm8); let zero = _mm_setzero_si128().as_i16x8(); @@ -7458,7 +7458,7 @@ pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let r: i16x32 = simd_shuffle!( a, @@ -7513,7 +7513,7 @@ pub unsafe fn _mm512_mask_shufflelo_epi16( k: __mmask32, a: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_shufflelo_epi16::(a); transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32())) } @@ -7526,7 +7526,7 @@ pub unsafe fn _mm512_mask_shufflelo_epi16( #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_shufflelo_epi16::(a); let zero = _mm512_setzero_si512().as_i16x32(); transmute(simd_select_bitmask(k, r.as_i16x32(), zero)) @@ -7544,7 +7544,7 @@ pub unsafe fn _mm256_mask_shufflelo_epi16( k: __mmask16, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm256_shufflelo_epi16::(a); transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16())) } @@ -7557,7 +7557,7 @@ pub unsafe fn _mm256_mask_shufflelo_epi16( #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm256_shufflelo_epi16::(a); let zero = _mm256_setzero_si256().as_i16x16(); transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) @@ -7575,7 +7575,7 @@ pub unsafe fn _mm_mask_shufflelo_epi16( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflelo_epi16::(a); transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8())) } @@ -7588,7 +7588,7 @@ pub unsafe fn _mm_mask_shufflelo_epi16( #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflelo_epi16::(a); let zero = _mm_setzero_si128().as_i16x8(); transmute(simd_select_bitmask(k, shuffle.as_i16x8(), zero)) @@ -7602,7 +7602,7 @@ pub unsafe fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); let r: i16x32 = simd_shuffle!( a, @@ -7657,7 +7657,7 @@ pub unsafe fn _mm512_mask_shufflehi_epi16( k: __mmask32, a: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_shufflehi_epi16::(a); transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32())) } @@ -7670,7 +7670,7 @@ pub unsafe fn _mm512_mask_shufflehi_epi16( #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_shufflehi_epi16::(a); let zero = _mm512_setzero_si512().as_i16x32(); transmute(simd_select_bitmask(k, r.as_i16x32(), zero)) @@ -7688,7 +7688,7 @@ pub unsafe fn _mm256_mask_shufflehi_epi16( k: __mmask16, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm256_shufflehi_epi16::(a); transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16())) } @@ -7701,7 +7701,7 @@ pub unsafe fn _mm256_mask_shufflehi_epi16( #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm256_shufflehi_epi16::(a); let zero = _mm256_setzero_si256().as_i16x16(); transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) @@ -7719,7 +7719,7 @@ pub unsafe fn _mm_mask_shufflehi_epi16( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflehi_epi16::(a); transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8())) } @@ -7732,7 +7732,7 @@ pub unsafe fn _mm_mask_shufflehi_epi16( #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_shufflehi_epi16(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflehi_epi16::(a); let zero = _mm_setzero_si128().as_i16x8(); transmute(simd_select_bitmask(k, shuffle.as_i16x8(), zero)) @@ -8173,7 +8173,7 @@ pub unsafe fn _mm512_sad_epu8(a: __m512i, b: __m512i) -> __m512i { #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm512_dbsad_epu8(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x64(); let b = b.as_u8x64(); let r = vdbpsadbw(a, b, IMM8); @@ -8193,7 +8193,7 @@ pub unsafe fn _mm512_mask_dbsad_epu8( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x64(); let b = b.as_u8x64(); let r = vdbpsadbw(a, b, IMM8); @@ -8212,7 +8212,7 @@ pub unsafe fn _mm512_maskz_dbsad_epu8( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x64(); let b = b.as_u8x64(); let r = vdbpsadbw(a, b, IMM8); @@ -8231,7 +8231,7 @@ pub unsafe fn _mm512_maskz_dbsad_epu8( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm256_dbsad_epu8(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x32(); let b = b.as_u8x32(); let r = vdbpsadbw256(a, b, IMM8); @@ -8251,7 +8251,7 @@ pub unsafe fn _mm256_mask_dbsad_epu8( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x32(); let b = b.as_u8x32(); let r = vdbpsadbw256(a, b, IMM8); @@ -8270,7 +8270,7 @@ pub unsafe fn _mm256_maskz_dbsad_epu8( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x32(); let b = b.as_u8x32(); let r = vdbpsadbw256(a, b, IMM8); @@ -8289,7 +8289,7 @@ pub unsafe fn _mm256_maskz_dbsad_epu8( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm_dbsad_epu8(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x16(); let b = b.as_u8x16(); let r = vdbpsadbw128(a, b, IMM8); @@ -8309,7 +8309,7 @@ pub unsafe fn _mm_mask_dbsad_epu8( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x16(); let b = b.as_u8x16(); let r = vdbpsadbw128(a, b, IMM8); @@ -8328,7 +8328,7 @@ pub unsafe fn _mm_maskz_dbsad_epu8( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_u8x16(); let b = b.as_u8x16(); let r = vdbpsadbw128(a, b, IMM8); @@ -9188,7 +9188,7 @@ pub unsafe fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_bslli_epi128(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); const fn mask(shift: i32, i: u32) -> u32 { let shift = shift as u32 & 0xff; if shift > 15 || i % 16 < shift { @@ -9280,7 +9280,7 @@ pub unsafe fn _mm512_bslli_epi128(a: __m512i) -> __m512i { #[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i8x64(); let zero = _mm512_setzero_si512().as_i8x64(); let r: i8x64 = match IMM8 % 16 { @@ -9645,7 +9645,7 @@ pub unsafe fn _mm512_mask_alignr_epi8( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi8::(a, b); transmute(simd_select_bitmask(k, r.as_i8x64(), src.as_i8x64())) } @@ -9662,7 +9662,7 @@ pub unsafe fn _mm512_maskz_alignr_epi8( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi8::(a, b); let zero = _mm512_setzero_si512().as_i8x64(); transmute(simd_select_bitmask(k, r.as_i8x64(), zero)) @@ -9681,7 +9681,7 @@ pub unsafe fn _mm256_mask_alignr_epi8( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi8::(a, b); transmute(simd_select_bitmask(k, r.as_i8x32(), src.as_i8x32())) } @@ -9698,7 +9698,7 @@ pub unsafe fn _mm256_maskz_alignr_epi8( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi8::(a, b); transmute(simd_select_bitmask( k, @@ -9720,7 +9720,7 @@ pub unsafe fn _mm_mask_alignr_epi8( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi8::(a, b); transmute(simd_select_bitmask(k, r.as_i8x16(), src.as_i8x16())) } @@ -9737,7 +9737,7 @@ pub unsafe fn _mm_maskz_alignr_epi8( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi8::(a, b); let zero = _mm_setzero_si128().as_i8x16(); transmute(simd_select_bitmask(k, r.as_i8x16(), zero)) diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index 7851871c7b6d..e0014f7edbc0 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -4907,7 +4907,7 @@ pub unsafe fn _mm_maskz_getexp_pd(k: __mmask8, a: __m128d) -> __m128d { #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_roundscale_ps(a: __m512) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); let r = vrndscaleps(a, IMM8, zero, 0b11111111_11111111, _MM_FROUND_CUR_DIRECTION); @@ -4932,7 +4932,7 @@ pub unsafe fn _mm512_mask_roundscale_ps( k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x16(); let src = src.as_f32x16(); let r = vrndscaleps(a, IMM8, src, k, _MM_FROUND_CUR_DIRECTION); @@ -4953,7 +4953,7 @@ pub unsafe fn _mm512_mask_roundscale_ps( #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_roundscale_ps(k: __mmask16, a: __m512) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); let r = vrndscaleps(a, IMM8, zero, k, _MM_FROUND_CUR_DIRECTION); @@ -4974,7 +4974,7 @@ pub unsafe fn _mm512_maskz_roundscale_ps(k: __mmask16, a: __m51 #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_roundscale_ps(a: __m256) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let zero = _mm256_setzero_ps().as_f32x8(); let r = vrndscaleps256(a, IMM8, zero, 0b11111111); @@ -4999,7 +4999,7 @@ pub unsafe fn _mm256_mask_roundscale_ps( k: __mmask8, a: __m256, ) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let src = src.as_f32x8(); let r = vrndscaleps256(a, IMM8, src, k); @@ -5020,7 +5020,7 @@ pub unsafe fn _mm256_mask_roundscale_ps( #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_roundscale_ps(k: __mmask8, a: __m256) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let zero = _mm256_setzero_ps().as_f32x8(); let r = vrndscaleps256(a, IMM8, zero, k); @@ -5041,7 +5041,7 @@ pub unsafe fn _mm256_maskz_roundscale_ps(k: __mmask8, a: __m256 #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_roundscale_ps(a: __m128) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); let r = vrndscaleps128(a, IMM8, zero, 0b00001111); @@ -5066,7 +5066,7 @@ pub unsafe fn _mm_mask_roundscale_ps( k: __mmask8, a: __m128, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let src = src.as_f32x4(); let r = vrndscaleps128(a, IMM8, src, k); @@ -5087,7 +5087,7 @@ pub unsafe fn _mm_mask_roundscale_ps( #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_roundscale_ps(k: __mmask8, a: __m128) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); let r = vrndscaleps128(a, IMM8, zero, k); @@ -5108,7 +5108,7 @@ pub unsafe fn _mm_maskz_roundscale_ps(k: __mmask8, a: __m128) - #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_roundscale_pd(a: __m512d) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); let r = vrndscalepd(a, IMM8, zero, 0b11111111, _MM_FROUND_CUR_DIRECTION); @@ -5133,7 +5133,7 @@ pub unsafe fn _mm512_mask_roundscale_pd( k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x8(); let src = src.as_f64x8(); let r = vrndscalepd(a, IMM8, src, k, _MM_FROUND_CUR_DIRECTION); @@ -5154,7 +5154,7 @@ pub unsafe fn _mm512_mask_roundscale_pd( #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512d) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); let r = vrndscalepd(a, IMM8, zero, k, _MM_FROUND_CUR_DIRECTION); @@ -5175,7 +5175,7 @@ pub unsafe fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512 #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_roundscale_pd(a: __m256d) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x4(); let zero = _mm256_setzero_pd().as_f64x4(); let r = vrndscalepd256(a, IMM8, zero, 0b00001111); @@ -5200,7 +5200,7 @@ pub unsafe fn _mm256_mask_roundscale_pd( k: __mmask8, a: __m256d, ) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x4(); let src = src.as_f64x4(); let r = vrndscalepd256(a, IMM8, src, k); @@ -5221,7 +5221,7 @@ pub unsafe fn _mm256_mask_roundscale_pd( #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256d) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x4(); let zero = _mm256_setzero_pd().as_f64x4(); let r = vrndscalepd256(a, IMM8, zero, k); @@ -5242,7 +5242,7 @@ pub unsafe fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256 #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_roundscale_pd(a: __m128d) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); let r = vrndscalepd128(a, IMM8, zero, 0b00000011); @@ -5267,7 +5267,7 @@ pub unsafe fn _mm_mask_roundscale_pd( k: __mmask8, a: __m128d, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let src = src.as_f64x2(); let r = vrndscalepd128(a, IMM8, src, k); @@ -5288,7 +5288,7 @@ pub unsafe fn _mm_mask_roundscale_pd( #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_roundscale_pd(k: __mmask8, a: __m128d) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); let r = vrndscalepd128(a, IMM8, zero, k); @@ -5559,7 +5559,7 @@ pub unsafe fn _mm_maskz_scalef_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128 #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fixupimm_ps(a: __m512, b: __m512, c: __m512i) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x16(); let b = b.as_f32x16(); let c = c.as_i32x16(); @@ -5580,7 +5580,7 @@ pub unsafe fn _mm512_mask_fixupimm_ps( b: __m512, c: __m512i, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x16(); let b = b.as_f32x16(); let c = c.as_i32x16(); @@ -5601,7 +5601,7 @@ pub unsafe fn _mm512_maskz_fixupimm_ps( b: __m512, c: __m512i, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x16(); let b = b.as_f32x16(); let c = c.as_i32x16(); @@ -5617,7 +5617,7 @@ pub unsafe fn _mm512_maskz_fixupimm_ps( #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_fixupimm_ps(a: __m256, b: __m256, c: __m256i) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let b = b.as_f32x8(); let c = c.as_i32x8(); @@ -5638,7 +5638,7 @@ pub unsafe fn _mm256_mask_fixupimm_ps( b: __m256, c: __m256i, ) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let b = b.as_f32x8(); let c = c.as_i32x8(); @@ -5659,7 +5659,7 @@ pub unsafe fn _mm256_maskz_fixupimm_ps( b: __m256, c: __m256i, ) -> __m256 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let b = b.as_f32x8(); let c = c.as_i32x8(); @@ -5675,7 +5675,7 @@ pub unsafe fn _mm256_maskz_fixupimm_ps( #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_ps(a: __m128, b: __m128, c: __m128i) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let c = c.as_i32x4(); @@ -5696,7 +5696,7 @@ pub unsafe fn _mm_mask_fixupimm_ps( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let c = c.as_i32x4(); @@ -5717,7 +5717,7 @@ pub unsafe fn _mm_maskz_fixupimm_ps( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let c = c.as_i32x4(); @@ -5733,7 +5733,7 @@ pub unsafe fn _mm_maskz_fixupimm_ps( #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fixupimm_pd(a: __m512d, b: __m512d, c: __m512i) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x8(); let b = b.as_f64x8(); let c = c.as_i64x8(); @@ -5754,7 +5754,7 @@ pub unsafe fn _mm512_mask_fixupimm_pd( b: __m512d, c: __m512i, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x8(); let b = b.as_f64x8(); let c = c.as_i64x8(); @@ -5775,7 +5775,7 @@ pub unsafe fn _mm512_maskz_fixupimm_pd( b: __m512d, c: __m512i, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x8(); let b = b.as_f64x8(); let c = c.as_i64x8(); @@ -5791,7 +5791,7 @@ pub unsafe fn _mm512_maskz_fixupimm_pd( #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_fixupimm_pd(a: __m256d, b: __m256d, c: __m256i) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x4(); let b = b.as_f64x4(); let c = c.as_i64x4(); @@ -5812,7 +5812,7 @@ pub unsafe fn _mm256_mask_fixupimm_pd( b: __m256d, c: __m256i, ) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x4(); let b = b.as_f64x4(); let c = c.as_i64x4(); @@ -5833,7 +5833,7 @@ pub unsafe fn _mm256_maskz_fixupimm_pd( b: __m256d, c: __m256i, ) -> __m256d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x4(); let b = b.as_f64x4(); let c = c.as_i64x4(); @@ -5849,7 +5849,7 @@ pub unsafe fn _mm256_maskz_fixupimm_pd( #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_pd(a: __m128d, b: __m128d, c: __m128i) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let c = c.as_i64x2(); @@ -5870,7 +5870,7 @@ pub unsafe fn _mm_mask_fixupimm_pd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let c = c.as_i64x2(); @@ -5891,7 +5891,7 @@ pub unsafe fn _mm_maskz_fixupimm_pd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let c = c.as_i64x2(); @@ -5911,7 +5911,7 @@ pub unsafe fn _mm512_ternarylogic_epi32( b: __m512i, c: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let b = b.as_i32x16(); let c = c.as_i32x16(); @@ -5932,7 +5932,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let src = src.as_i32x16(); let a = a.as_i32x16(); let b = b.as_i32x16(); @@ -5953,7 +5953,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi32( b: __m512i, c: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let b = b.as_i32x16(); let c = c.as_i32x16(); @@ -5974,7 +5974,7 @@ pub unsafe fn _mm256_ternarylogic_epi32( b: __m256i, c: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let b = b.as_i32x8(); let c = c.as_i32x8(); @@ -5995,7 +5995,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let src = src.as_i32x8(); let a = a.as_i32x8(); let b = b.as_i32x8(); @@ -6016,7 +6016,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi32( b: __m256i, c: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let b = b.as_i32x8(); let c = c.as_i32x8(); @@ -6037,7 +6037,7 @@ pub unsafe fn _mm_ternarylogic_epi32( b: __m128i, c: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let b = b.as_i32x4(); let c = c.as_i32x4(); @@ -6058,7 +6058,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let src = src.as_i32x4(); let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -6079,7 +6079,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi32( b: __m128i, c: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let b = b.as_i32x4(); let c = c.as_i32x4(); @@ -6100,7 +6100,7 @@ pub unsafe fn _mm512_ternarylogic_epi64( b: __m512i, c: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let b = b.as_i64x8(); let c = c.as_i64x8(); @@ -6121,7 +6121,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let src = src.as_i64x8(); let a = a.as_i64x8(); let b = b.as_i64x8(); @@ -6142,7 +6142,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi64( b: __m512i, c: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let b = b.as_i64x8(); let c = c.as_i64x8(); @@ -6163,7 +6163,7 @@ pub unsafe fn _mm256_ternarylogic_epi64( b: __m256i, c: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let b = b.as_i64x4(); let c = c.as_i64x4(); @@ -6184,7 +6184,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let src = src.as_i64x4(); let a = a.as_i64x4(); let b = b.as_i64x4(); @@ -6205,7 +6205,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi64( b: __m256i, c: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let b = b.as_i64x4(); let c = c.as_i64x4(); @@ -6226,7 +6226,7 @@ pub unsafe fn _mm_ternarylogic_epi64( b: __m128i, c: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let b = b.as_i64x2(); let c = c.as_i64x2(); @@ -6247,7 +6247,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let src = src.as_i64x2(); let a = a.as_i64x2(); let b = b.as_i64x2(); @@ -6268,7 +6268,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi64( b: __m128i, c: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let b = b.as_i64x2(); let c = c.as_i64x2(); @@ -6299,8 +6299,8 @@ pub unsafe fn _mm512_getmant_ps< >( a: __m512, ) -> __m512 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); let r = vgetmantps( @@ -6337,8 +6337,8 @@ pub unsafe fn _mm512_mask_getmant_ps< k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x16(); let src = src.as_f32x16(); let r = vgetmantps(a, SIGN << 2 | NORM, src, k, _MM_FROUND_CUR_DIRECTION); @@ -6368,8 +6368,8 @@ pub unsafe fn _mm512_maskz_getmant_ps< k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); let r = vgetmantps(a, SIGN << 2 | NORM, zero, k, _MM_FROUND_CUR_DIRECTION); @@ -6398,8 +6398,8 @@ pub unsafe fn _mm256_getmant_ps< >( a: __m256, ) -> __m256 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x8(); let zero = _mm256_setzero_ps().as_f32x8(); let r = vgetmantps256(a, SIGN << 2 | NORM, zero, 0b11111111); @@ -6430,8 +6430,8 @@ pub unsafe fn _mm256_mask_getmant_ps< k: __mmask8, a: __m256, ) -> __m256 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x8(); let src = src.as_f32x8(); let r = vgetmantps256(a, SIGN << 2 | NORM, src, k); @@ -6461,8 +6461,8 @@ pub unsafe fn _mm256_maskz_getmant_ps< k: __mmask8, a: __m256, ) -> __m256 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x8(); let zero = _mm256_setzero_ps().as_f32x8(); let r = vgetmantps256(a, SIGN << 2 | NORM, zero, k); @@ -6491,8 +6491,8 @@ pub unsafe fn _mm_getmant_ps< >( a: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); let r = vgetmantps128(a, SIGN << 2 | NORM, zero, 0b00001111); @@ -6523,8 +6523,8 @@ pub unsafe fn _mm_mask_getmant_ps< k: __mmask8, a: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x4(); let src = src.as_f32x4(); let r = vgetmantps128(a, SIGN << 2 | NORM, src, k); @@ -6554,8 +6554,8 @@ pub unsafe fn _mm_maskz_getmant_ps< k: __mmask8, a: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); let r = vgetmantps128(a, SIGN << 2 | NORM, zero, k); @@ -6584,8 +6584,8 @@ pub unsafe fn _mm512_getmant_pd< >( a: __m512d, ) -> __m512d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); let r = vgetmantpd( @@ -6622,8 +6622,8 @@ pub unsafe fn _mm512_mask_getmant_pd< k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x8(); let src = src.as_f64x8(); let r = vgetmantpd(a, SIGN << 2 | NORM, src, k, _MM_FROUND_CUR_DIRECTION); @@ -6653,8 +6653,8 @@ pub unsafe fn _mm512_maskz_getmant_pd< k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); let r = vgetmantpd(a, SIGN << 2 | NORM, zero, k, _MM_FROUND_CUR_DIRECTION); @@ -6683,8 +6683,8 @@ pub unsafe fn _mm256_getmant_pd< >( a: __m256d, ) -> __m256d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x4(); let zero = _mm256_setzero_pd().as_f64x4(); let r = vgetmantpd256(a, SIGN << 2 | NORM, zero, 0b00001111); @@ -6715,8 +6715,8 @@ pub unsafe fn _mm256_mask_getmant_pd< k: __mmask8, a: __m256d, ) -> __m256d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x4(); let src = src.as_f64x4(); let r = vgetmantpd256(a, SIGN << 2 | NORM, src, k); @@ -6746,8 +6746,8 @@ pub unsafe fn _mm256_maskz_getmant_pd< k: __mmask8, a: __m256d, ) -> __m256d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x4(); let zero = _mm256_setzero_pd().as_f64x4(); let r = vgetmantpd256(a, SIGN << 2 | NORM, zero, k); @@ -6776,8 +6776,8 @@ pub unsafe fn _mm_getmant_pd< >( a: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); let r = vgetmantpd128(a, SIGN << 2 | NORM, zero, 0b00000011); @@ -6808,8 +6808,8 @@ pub unsafe fn _mm_mask_getmant_pd< k: __mmask8, a: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x2(); let src = src.as_f64x2(); let r = vgetmantpd128(a, SIGN << 2 | NORM, src, k); @@ -6839,8 +6839,8 @@ pub unsafe fn _mm_maskz_getmant_pd< k: __mmask8, a: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); let r = vgetmantpd128(a, SIGN << 2 | NORM, zero, k); @@ -9319,7 +9319,7 @@ pub unsafe fn _mm512_maskz_getexp_round_pd(k: __mmask8, a: __m51 #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm512_roundscale_round_ps(a: __m512) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); @@ -9346,7 +9346,7 @@ pub unsafe fn _mm512_mask_roundscale_round_ps( k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let src = src.as_f32x16(); @@ -9372,7 +9372,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_ps( k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); @@ -9395,7 +9395,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_ps( #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm512_roundscale_round_pd(a: __m512d) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); @@ -9422,7 +9422,7 @@ pub unsafe fn _mm512_mask_roundscale_round_pd( k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let src = src.as_f64x8(); @@ -9448,7 +9448,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_pd( k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); @@ -9625,7 +9625,7 @@ pub unsafe fn _mm512_fixupimm_round_ps( b: __m512, c: __m512i, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let b = b.as_f32x16(); @@ -9648,7 +9648,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_ps( b: __m512, c: __m512i, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let b = b.as_f32x16(); @@ -9671,7 +9671,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_ps( b: __m512, c: __m512i, ) -> __m512 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let b = b.as_f32x16(); @@ -9693,7 +9693,7 @@ pub unsafe fn _mm512_fixupimm_round_pd( b: __m512d, c: __m512i, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let b = b.as_f64x8(); @@ -9716,7 +9716,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_pd( b: __m512d, c: __m512i, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let b = b.as_f64x8(); @@ -9739,7 +9739,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_pd( b: __m512d, c: __m512i, ) -> __m512d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let b = b.as_f64x8(); @@ -9772,8 +9772,8 @@ pub unsafe fn _mm512_getmant_round_ps< >( a: __m512, ) -> __m512 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); @@ -9807,8 +9807,8 @@ pub unsafe fn _mm512_mask_getmant_round_ps< k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let src = src.as_f32x16(); @@ -9841,8 +9841,8 @@ pub unsafe fn _mm512_maskz_getmant_round_ps< k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); @@ -9874,8 +9874,8 @@ pub unsafe fn _mm512_getmant_round_pd< >( a: __m512d, ) -> __m512d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); @@ -9909,8 +9909,8 @@ pub unsafe fn _mm512_mask_getmant_round_pd< k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let src = src.as_f64x8(); @@ -9943,8 +9943,8 @@ pub unsafe fn _mm512_maskz_getmant_round_pd< k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); @@ -13941,7 +13941,7 @@ pub unsafe fn _mm256_mask_cvt_roundps_ph( k: __mmask8, a: __m256, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let src = src.as_i16x8(); let r = vcvtps2ph256(a, IMM8, src, k); @@ -13962,7 +13962,7 @@ pub unsafe fn _mm256_mask_cvt_roundps_ph( #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_cvt_roundps_ph(k: __mmask8, a: __m256) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let zero = _mm_setzero_si128().as_i16x8(); let r = vcvtps2ph256(a, IMM8, zero, k); @@ -13987,7 +13987,7 @@ pub unsafe fn _mm_mask_cvt_roundps_ph( k: __mmask8, a: __m128, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let src = src.as_i16x8(); let r = vcvtps2ph128(a, IMM8, src, k); @@ -14008,7 +14008,7 @@ pub unsafe fn _mm_mask_cvt_roundps_ph( #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_cvt_roundps_ph(k: __mmask8, a: __m128) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let zero = _mm_setzero_si128().as_i16x8(); let r = vcvtps2ph128(a, IMM8, zero, k); @@ -14085,7 +14085,7 @@ pub unsafe fn _mm256_mask_cvtps_ph( k: __mmask8, a: __m256, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let src = src.as_i16x8(); let r = vcvtps2ph256(a, IMM8, src, k); @@ -14106,7 +14106,7 @@ pub unsafe fn _mm256_mask_cvtps_ph( #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_cvtps_ph(k: __mmask8, a: __m256) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x8(); let zero = _mm_setzero_si128().as_i16x8(); let r = vcvtps2ph256(a, IMM8, zero, k); @@ -14127,7 +14127,7 @@ pub unsafe fn _mm256_maskz_cvtps_ph(k: __mmask8, a: __m256) -> #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_cvtps_ph(src: __m128i, k: __mmask8, a: __m128) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let src = src.as_i16x8(); let r = vcvtps2ph128(a, IMM8, src, k); @@ -14148,7 +14148,7 @@ pub unsafe fn _mm_mask_cvtps_ph(src: __m128i, k: __mmask8, a: _ #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_cvtps_ph(k: __mmask8, a: __m128) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let zero = _mm_setzero_si128().as_i16x8(); let r = vcvtps2ph128(a, IMM8, zero, k); @@ -16556,7 +16556,7 @@ pub unsafe fn _mm_maskz_expand_pd(k: __mmask8, a: __m128d) -> __m128d { #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_rol_epi32(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vprold(a, IMM8); transmute(r) @@ -16574,7 +16574,7 @@ pub unsafe fn _mm512_mask_rol_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vprold(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x16())) @@ -16588,7 +16588,7 @@ pub unsafe fn _mm512_mask_rol_epi32( #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vprold(a, IMM8); let zero = _mm512_setzero_si512().as_i32x16(); @@ -16603,7 +16603,7 @@ pub unsafe fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_rol_epi32(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let r = vprold256(a, IMM8); transmute(r) @@ -16621,7 +16621,7 @@ pub unsafe fn _mm256_mask_rol_epi32( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let r = vprold256(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x8())) @@ -16635,7 +16635,7 @@ pub unsafe fn _mm256_mask_rol_epi32( #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let r = vprold256(a, IMM8); let zero = _mm256_setzero_si256().as_i32x8(); @@ -16650,7 +16650,7 @@ pub unsafe fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) - #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_rol_epi32(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let r = vprold128(a, IMM8); transmute(r) @@ -16668,7 +16668,7 @@ pub unsafe fn _mm_mask_rol_epi32( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let r = vprold128(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x4())) @@ -16682,7 +16682,7 @@ pub unsafe fn _mm_mask_rol_epi32( #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let r = vprold128(a, IMM8); let zero = _mm_setzero_si128().as_i32x4(); @@ -16697,7 +16697,7 @@ pub unsafe fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> _ #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_ror_epi32(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vprord(a, IMM8); transmute(r) @@ -16715,7 +16715,7 @@ pub unsafe fn _mm512_mask_ror_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vprord(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x16())) @@ -16729,7 +16729,7 @@ pub unsafe fn _mm512_mask_ror_epi32( #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vprord(a, IMM8); let zero = _mm512_setzero_si512().as_i32x16(); @@ -16744,7 +16744,7 @@ pub unsafe fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_ror_epi32(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let r = vprord256(a, IMM8); transmute(r) @@ -16762,7 +16762,7 @@ pub unsafe fn _mm256_mask_ror_epi32( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let r = vprord256(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x8())) @@ -16776,7 +16776,7 @@ pub unsafe fn _mm256_mask_ror_epi32( #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let r = vprord256(a, IMM8); let zero = _mm256_setzero_si256().as_i32x8(); @@ -16791,7 +16791,7 @@ pub unsafe fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) - #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_ror_epi32(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let r = vprord128(a, IMM8); transmute(r) @@ -16809,7 +16809,7 @@ pub unsafe fn _mm_mask_ror_epi32( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let r = vprord128(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x4())) @@ -16823,7 +16823,7 @@ pub unsafe fn _mm_mask_ror_epi32( #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let r = vprord128(a, IMM8); let zero = _mm_setzero_si128().as_i32x4(); @@ -16838,7 +16838,7 @@ pub unsafe fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> _ #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_rol_epi64(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vprolq(a, IMM8); transmute(r) @@ -16856,7 +16856,7 @@ pub unsafe fn _mm512_mask_rol_epi64( k: __mmask8, a: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vprolq(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i64x8())) @@ -16870,7 +16870,7 @@ pub unsafe fn _mm512_mask_rol_epi64( #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vprolq(a, IMM8); let zero = _mm512_setzero_si512().as_i64x8(); @@ -16885,7 +16885,7 @@ pub unsafe fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) - #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_rol_epi64(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vprolq256(a, IMM8); transmute(r) @@ -16903,7 +16903,7 @@ pub unsafe fn _mm256_mask_rol_epi64( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vprolq256(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i64x4())) @@ -16917,7 +16917,7 @@ pub unsafe fn _mm256_mask_rol_epi64( #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vprolq256(a, IMM8); let zero = _mm256_setzero_si256().as_i64x4(); @@ -16932,7 +16932,7 @@ pub unsafe fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) - #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_rol_epi64(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vprolq128(a, IMM8); transmute(r) @@ -16950,7 +16950,7 @@ pub unsafe fn _mm_mask_rol_epi64( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vprolq128(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i64x2())) @@ -16964,7 +16964,7 @@ pub unsafe fn _mm_mask_rol_epi64( #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vprolq128(a, IMM8); let zero = _mm_setzero_si128().as_i64x2(); @@ -16979,7 +16979,7 @@ pub unsafe fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> _ #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_ror_epi64(a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vprorq(a, IMM8); transmute(r) @@ -16997,7 +16997,7 @@ pub unsafe fn _mm512_mask_ror_epi64( k: __mmask8, a: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vprorq(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i64x8())) @@ -17011,7 +17011,7 @@ pub unsafe fn _mm512_mask_ror_epi64( #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vprorq(a, IMM8); let zero = _mm512_setzero_si512().as_i64x8(); @@ -17026,7 +17026,7 @@ pub unsafe fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) - #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_ror_epi64(a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vprorq256(a, IMM8); transmute(r) @@ -17044,7 +17044,7 @@ pub unsafe fn _mm256_mask_ror_epi64( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vprorq256(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i64x4())) @@ -17058,7 +17058,7 @@ pub unsafe fn _mm256_mask_ror_epi64( #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vprorq256(a, IMM8); let zero = _mm256_setzero_si256().as_i64x4(); @@ -17073,7 +17073,7 @@ pub unsafe fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) - #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_ror_epi64(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vprorq128(a, IMM8); transmute(r) @@ -17091,7 +17091,7 @@ pub unsafe fn _mm_mask_ror_epi64( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vprorq128(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i64x2())) @@ -17105,7 +17105,7 @@ pub unsafe fn _mm_mask_ror_epi64( #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vprorq128(a, IMM8); let zero = _mm_setzero_si128().as_i64x2(); @@ -17120,7 +17120,7 @@ pub unsafe fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> _ #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_slli_epi32(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vpsllid(a, IMM8); transmute(r) @@ -17138,7 +17138,7 @@ pub unsafe fn _mm512_mask_slli_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let shf = vpsllid(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i32x16())) @@ -17152,7 +17152,7 @@ pub unsafe fn _mm512_mask_slli_epi32( #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let shf = vpsllid(a, IMM8); let zero = _mm512_setzero_si512().as_i32x16(); @@ -17171,7 +17171,7 @@ pub unsafe fn _mm256_mask_slli_epi32( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psllid256(a.as_i32x8(), imm8); transmute(simd_select_bitmask(k, r, src.as_i32x8())) @@ -17185,7 +17185,7 @@ pub unsafe fn _mm256_mask_slli_epi32( #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psllid256(a.as_i32x8(), imm8); let zero = _mm256_setzero_si256().as_i32x8(); @@ -17204,7 +17204,7 @@ pub unsafe fn _mm_mask_slli_epi32( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psllid128(a.as_i32x4(), imm8); transmute(simd_select_bitmask(k, r, src.as_i32x4())) @@ -17218,7 +17218,7 @@ pub unsafe fn _mm_mask_slli_epi32( #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psllid128(a.as_i32x4(), imm8); let zero = _mm_setzero_si128().as_i32x4(); @@ -17233,7 +17233,7 @@ pub unsafe fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srli_epi32(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vpsrlid(a, IMM8); transmute(r) @@ -17251,7 +17251,7 @@ pub unsafe fn _mm512_mask_srli_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let shf = vpsrlid(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i32x16())) @@ -17265,7 +17265,7 @@ pub unsafe fn _mm512_mask_srli_epi32( #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let shf = vpsrlid(a, IMM8); let zero = _mm512_setzero_si512().as_i32x16(); @@ -17284,7 +17284,7 @@ pub unsafe fn _mm256_mask_srli_epi32( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrlid256(a.as_i32x8(), imm8); transmute(simd_select_bitmask(k, r, src.as_i32x8())) @@ -17298,7 +17298,7 @@ pub unsafe fn _mm256_mask_srli_epi32( #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrlid256(a.as_i32x8(), imm8); let zero = _mm256_setzero_si256().as_i32x8(); @@ -17317,7 +17317,7 @@ pub unsafe fn _mm_mask_srli_epi32( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrlid128(a.as_i32x4(), imm8); transmute(simd_select_bitmask(k, r, src.as_i32x4())) @@ -17331,7 +17331,7 @@ pub unsafe fn _mm_mask_srli_epi32( #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrlid128(a.as_i32x4(), imm8); let zero = _mm_setzero_si128().as_i32x4(); @@ -17346,7 +17346,7 @@ pub unsafe fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_slli_epi64(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vpslliq(a, IMM8); transmute(r) @@ -17364,7 +17364,7 @@ pub unsafe fn _mm512_mask_slli_epi64( k: __mmask8, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let shf = vpslliq(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i64x8())) @@ -17378,7 +17378,7 @@ pub unsafe fn _mm512_mask_slli_epi64( #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let shf = vpslliq(a, IMM8); let zero = _mm512_setzero_si512().as_i64x8(); @@ -17397,7 +17397,7 @@ pub unsafe fn _mm256_mask_slli_epi64( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliq256(a.as_i64x4(), imm8); transmute(simd_select_bitmask(k, r, src.as_i64x4())) @@ -17411,7 +17411,7 @@ pub unsafe fn _mm256_mask_slli_epi64( #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliq256(a.as_i64x4(), imm8); let zero = _mm256_setzero_si256().as_i64x4(); @@ -17430,7 +17430,7 @@ pub unsafe fn _mm_mask_slli_epi64( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliq128(a.as_i64x2(), imm8); transmute(simd_select_bitmask(k, r, src.as_i64x2())) @@ -17444,7 +17444,7 @@ pub unsafe fn _mm_mask_slli_epi64( #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = pslliq128(a.as_i64x2(), imm8); let zero = _mm_setzero_si128().as_i64x2(); @@ -17459,7 +17459,7 @@ pub unsafe fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srli_epi64(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vpsrliq(a, IMM8); transmute(r) @@ -17477,7 +17477,7 @@ pub unsafe fn _mm512_mask_srli_epi64( k: __mmask8, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let shf = vpsrliq(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i64x8())) @@ -17491,7 +17491,7 @@ pub unsafe fn _mm512_mask_srli_epi64( #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let shf = vpsrliq(a, IMM8); let zero = _mm512_setzero_si512().as_i64x8(); @@ -17510,7 +17510,7 @@ pub unsafe fn _mm256_mask_srli_epi64( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrliq256(a.as_i64x4(), imm8); transmute(simd_select_bitmask(k, r, src.as_i64x4())) @@ -17524,7 +17524,7 @@ pub unsafe fn _mm256_mask_srli_epi64( #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrliq256(a.as_i64x4(), imm8); let zero = _mm256_setzero_si256().as_i64x4(); @@ -17543,7 +17543,7 @@ pub unsafe fn _mm_mask_srli_epi64( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrliq128(a.as_i64x2(), imm8); transmute(simd_select_bitmask(k, r, src.as_i64x2())) @@ -17557,7 +17557,7 @@ pub unsafe fn _mm_mask_srli_epi64( #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srli_epi64(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i32; let r = psrliq128(a.as_i64x2(), imm8); let zero = _mm_setzero_si128().as_i64x2(); @@ -18126,7 +18126,7 @@ pub unsafe fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __ #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srai_epi32(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vpsraid512(a, IMM8); transmute(r) @@ -18144,7 +18144,7 @@ pub unsafe fn _mm512_mask_srai_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vpsraid512(a, IMM8); transmute(simd_select_bitmask(k, r, src.as_i32x16())) @@ -18158,7 +18158,7 @@ pub unsafe fn _mm512_mask_srai_epi32( #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let r = vpsraid512(a, IMM8); let zero = _mm512_setzero_si512().as_i32x16(); @@ -18235,7 +18235,7 @@ pub unsafe fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srai_epi64(a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let r = vpsraiq(a, IMM8); transmute(r) @@ -18253,7 +18253,7 @@ pub unsafe fn _mm512_mask_srai_epi64( k: __mmask8, a: __m512i, ) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let shf = vpsraiq(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i64x8())) @@ -18267,7 +18267,7 @@ pub unsafe fn _mm512_mask_srai_epi64( #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m512i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x8(); let shf = vpsraiq(a, IMM8); let zero = _mm512_setzero_si512().as_i64x8(); @@ -18282,7 +18282,7 @@ pub unsafe fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_srai_epi64(a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let r = vpsraiq256(a, IMM8); transmute(r) @@ -18300,7 +18300,7 @@ pub unsafe fn _mm256_mask_srai_epi64( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let shf = vpsraiq256(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i64x4())) @@ -18314,7 +18314,7 @@ pub unsafe fn _mm256_mask_srai_epi64( #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x4(); let shf = vpsraiq256(a, IMM8); let zero = _mm256_setzero_si256().as_i64x4(); @@ -18329,7 +18329,7 @@ pub unsafe fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_srai_epi64(a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let r = vpsraiq128(a, IMM8); transmute(r) @@ -18347,7 +18347,7 @@ pub unsafe fn _mm_mask_srai_epi64( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let shf = vpsraiq128(a, IMM8); transmute(simd_select_bitmask(k, shf, src.as_i64x2())) @@ -18361,7 +18361,7 @@ pub unsafe fn _mm_mask_srai_epi64( #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> __m128i { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i64x2(); let shf = vpsraiq128(a, IMM8); let zero = _mm_setzero_si128().as_i64x2(); @@ -19366,7 +19366,7 @@ pub unsafe fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _ #[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permute_ps(a: __m512) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, a, @@ -19403,7 +19403,7 @@ pub unsafe fn _mm512_mask_permute_ps( k: __mmask16, a: __m512, ) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16())) } @@ -19416,7 +19416,7 @@ pub unsafe fn _mm512_mask_permute_ps( #[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_ps::(a); let zero = _mm512_setzero_ps().as_f32x16(); transmute(simd_select_bitmask(k, r.as_f32x16(), zero)) @@ -19484,7 +19484,7 @@ pub unsafe fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> _ #[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permute_pd(a: __m512d) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, a, @@ -19513,7 +19513,7 @@ pub unsafe fn _mm512_mask_permute_pd( k: __mmask8, a: __m512d, ) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8())) } @@ -19526,7 +19526,7 @@ pub unsafe fn _mm512_mask_permute_pd( #[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_pd::(a); let zero = _mm512_setzero_pd().as_f64x8(); transmute(simd_select_bitmask(k, r.as_f64x8(), zero)) @@ -19544,7 +19544,7 @@ pub unsafe fn _mm256_mask_permute_pd( k: __mmask8, a: __m256d, ) -> __m256d { - static_assert_imm4!(MASK); + static_assert_uimm_bits!(MASK, 4); let r = _mm256_permute_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4())) } @@ -19557,7 +19557,7 @@ pub unsafe fn _mm256_mask_permute_pd( #[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m256d { - static_assert_imm4!(MASK); + static_assert_uimm_bits!(MASK, 4); let r = _mm256_permute_pd::(a); let zero = _mm256_setzero_pd().as_f64x4(); transmute(simd_select_bitmask(k, r.as_f64x4(), zero)) @@ -19575,7 +19575,7 @@ pub unsafe fn _mm_mask_permute_pd( k: __mmask8, a: __m128d, ) -> __m128d { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); let r = _mm_permute_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x2(), src.as_f64x2())) } @@ -19588,7 +19588,7 @@ pub unsafe fn _mm_mask_permute_pd( #[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0b01))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); let r = _mm_permute_pd::(a); let zero = _mm_setzero_pd().as_f64x2(); transmute(simd_select_bitmask(k, r.as_f64x2(), zero)) @@ -19602,7 +19602,7 @@ pub unsafe fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permutex_epi64(a: __m512i) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, a, @@ -19631,7 +19631,7 @@ pub unsafe fn _mm512_mask_permutex_epi64( k: __mmask8, a: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_permutex_epi64::(a); transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8())) } @@ -19644,7 +19644,7 @@ pub unsafe fn _mm512_mask_permutex_epi64( #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_permutex_epi64::(a); let zero = _mm512_setzero_si512().as_i64x8(); transmute(simd_select_bitmask(k, r.as_i64x8(), zero)) @@ -19658,7 +19658,7 @@ pub unsafe fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m51 #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_permutex_epi64(a: __m256i) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, a, @@ -19683,7 +19683,7 @@ pub unsafe fn _mm256_mask_permutex_epi64( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_epi64::(a); transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4())) } @@ -19696,7 +19696,7 @@ pub unsafe fn _mm256_mask_permutex_epi64( #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_epi64::(a); let zero = _mm256_setzero_si256().as_i64x4(); transmute(simd_select_bitmask(k, r.as_i64x4(), zero)) @@ -19710,7 +19710,7 @@ pub unsafe fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m25 #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permutex_pd(a: __m512d) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, a, @@ -19764,7 +19764,7 @@ pub unsafe fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_permutex_pd(a: __m256d) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, a, @@ -19789,7 +19789,7 @@ pub unsafe fn _mm256_mask_permutex_pd( k: __mmask8, a: __m256d, ) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4())) } @@ -19802,7 +19802,7 @@ pub unsafe fn _mm256_mask_permutex_pd( #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permutex_pd(k: __mmask8, a: __m256d) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_pd::(a); let zero = _mm256_setzero_pd().as_f64x4(); transmute(simd_select_bitmask(k, r.as_f64x4(), zero)) @@ -21018,7 +21018,7 @@ pub unsafe fn _mm_mask2_permutex2var_pd( #[cfg_attr(test, assert_instr(vpermilps, MASK = 9))] //should be vpshufd #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r: i32x16 = simd_shuffle!( a.as_i32x16(), a.as_i32x16(), @@ -21056,7 +21056,7 @@ pub unsafe fn _mm512_mask_shuffle_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_epi32::(a); transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16())) } @@ -21072,7 +21072,7 @@ pub unsafe fn _mm512_maskz_shuffle_epi32( k: __mmask16, a: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_epi32::(a); let zero = _mm512_setzero_si512().as_i32x16(); transmute(simd_select_bitmask(k, r.as_i32x16(), zero)) @@ -21090,7 +21090,7 @@ pub unsafe fn _mm256_mask_shuffle_epi32( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_epi32::(a); transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8())) } @@ -21106,7 +21106,7 @@ pub unsafe fn _mm256_maskz_shuffle_epi32( k: __mmask8, a: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_epi32::(a); let zero = _mm256_setzero_si256().as_i32x8(); transmute(simd_select_bitmask(k, r.as_i32x8(), zero)) @@ -21124,7 +21124,7 @@ pub unsafe fn _mm_mask_shuffle_epi32( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_epi32::(a); transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4())) } @@ -21140,7 +21140,7 @@ pub unsafe fn _mm_maskz_shuffle_epi32( k: __mmask8, a: __m128i, ) -> __m128i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_epi32::(a); let zero = _mm_setzero_si128().as_i32x4(); transmute(simd_select_bitmask(k, r.as_i32x4(), zero)) @@ -21154,7 +21154,7 @@ pub unsafe fn _mm_maskz_shuffle_epi32( #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, b, @@ -21192,7 +21192,7 @@ pub unsafe fn _mm512_mask_shuffle_ps( a: __m512, b: __m512, ) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_ps::(a, b); transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16())) } @@ -21209,7 +21209,7 @@ pub unsafe fn _mm512_maskz_shuffle_ps( a: __m512, b: __m512, ) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_ps::(a, b); let zero = _mm512_setzero_ps().as_f32x16(); transmute(simd_select_bitmask(k, r.as_f32x16(), zero)) @@ -21228,7 +21228,7 @@ pub unsafe fn _mm256_mask_shuffle_ps( a: __m256, b: __m256, ) -> __m256 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_ps::(a, b); transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8())) } @@ -21245,7 +21245,7 @@ pub unsafe fn _mm256_maskz_shuffle_ps( a: __m256, b: __m256, ) -> __m256 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_ps::(a, b); let zero = _mm256_setzero_ps().as_f32x8(); transmute(simd_select_bitmask(k, r.as_f32x8(), zero)) @@ -21264,7 +21264,7 @@ pub unsafe fn _mm_mask_shuffle_ps( a: __m128, b: __m128, ) -> __m128 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_ps::(a, b); transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4())) } @@ -21277,7 +21277,7 @@ pub unsafe fn _mm_mask_shuffle_ps( #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_ps::(a, b); let zero = _mm_setzero_ps().as_f32x4(); transmute(simd_select_bitmask(k, r.as_f32x4(), zero)) @@ -21291,7 +21291,7 @@ pub unsafe fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: _ #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, b, @@ -21321,7 +21321,7 @@ pub unsafe fn _mm512_mask_shuffle_pd( a: __m512d, b: __m512d, ) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_pd::(a, b); transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8())) } @@ -21338,7 +21338,7 @@ pub unsafe fn _mm512_maskz_shuffle_pd( a: __m512d, b: __m512d, ) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_pd::(a, b); let zero = _mm512_setzero_pd().as_f64x8(); transmute(simd_select_bitmask(k, r.as_f64x8(), zero)) @@ -21357,7 +21357,7 @@ pub unsafe fn _mm256_mask_shuffle_pd( a: __m256d, b: __m256d, ) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_pd::(a, b); transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4())) } @@ -21374,7 +21374,7 @@ pub unsafe fn _mm256_maskz_shuffle_pd( a: __m256d, b: __m256d, ) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_pd::(a, b); let zero = _mm256_setzero_pd().as_f64x4(); transmute(simd_select_bitmask(k, r.as_f64x4(), zero)) @@ -21393,7 +21393,7 @@ pub unsafe fn _mm_mask_shuffle_pd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_pd::(a, b); transmute(simd_select_bitmask(k, r.as_f64x2(), src.as_f64x2())) } @@ -21410,7 +21410,7 @@ pub unsafe fn _mm_maskz_shuffle_pd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_pd::(a, b); let zero = _mm_setzero_pd().as_f64x2(); transmute(simd_select_bitmask(k, r.as_f64x2(), zero)) @@ -21424,7 +21424,7 @@ pub unsafe fn _mm_maskz_shuffle_pd( #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_01_01_01))] //should be vshufi32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_i32x16(); let b = b.as_i32x16(); let r: i32x16 = simd_shuffle!( @@ -21465,7 +21465,7 @@ pub unsafe fn _mm512_mask_shuffle_i32x4( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_i32x4::(a, b); transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16())) } @@ -21482,7 +21482,7 @@ pub unsafe fn _mm512_maskz_shuffle_i32x4( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_i32x4::(a, b); let zero = _mm512_setzero_si512().as_i32x16(); transmute(simd_select_bitmask(k, r.as_i32x16(), zero)) @@ -21496,7 +21496,7 @@ pub unsafe fn _mm512_maskz_shuffle_i32x4( #[cfg_attr(test, assert_instr(vperm, MASK = 0b11))] //should be vshufi32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_i32x8(); let b = b.as_i32x8(); let r: i32x8 = simd_shuffle!( @@ -21529,7 +21529,7 @@ pub unsafe fn _mm256_mask_shuffle_i32x4( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_i32x4::(a, b); transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8())) } @@ -21546,7 +21546,7 @@ pub unsafe fn _mm256_maskz_shuffle_i32x4( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_i32x4::(a, b); let zero = _mm256_setzero_si256().as_i32x8(); transmute(simd_select_bitmask(k, r.as_i32x8(), zero)) @@ -21560,7 +21560,7 @@ pub unsafe fn _mm256_maskz_shuffle_i32x4( #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_i64x8(); let b = b.as_i64x8(); let r: i64x8 = simd_shuffle!( @@ -21593,7 +21593,7 @@ pub unsafe fn _mm512_mask_shuffle_i64x2( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_i64x2::(a, b); transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8())) } @@ -21610,7 +21610,7 @@ pub unsafe fn _mm512_maskz_shuffle_i64x2( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_i64x2::(a, b); let zero = _mm512_setzero_si512().as_i64x8(); transmute(simd_select_bitmask(k, r.as_i64x8(), zero)) @@ -21624,7 +21624,7 @@ pub unsafe fn _mm512_maskz_shuffle_i64x2( #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshufi64x2 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_i64x4(); let b = b.as_i64x4(); let r: i64x4 = simd_shuffle!( @@ -21653,7 +21653,7 @@ pub unsafe fn _mm256_mask_shuffle_i64x2( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_i64x2::(a, b); transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4())) } @@ -21670,7 +21670,7 @@ pub unsafe fn _mm256_maskz_shuffle_i64x2( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_i64x2::(a, b); let zero = _mm256_setzero_si256().as_i64x4(); transmute(simd_select_bitmask(k, r.as_i64x4(), zero)) @@ -21684,7 +21684,7 @@ pub unsafe fn _mm256_maskz_shuffle_i64x2( #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b1011))] //should be vshuff32x4, but generate vshuff64x2 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_f32x16(); let b = b.as_f32x16(); let r: f32x16 = simd_shuffle!( @@ -21725,7 +21725,7 @@ pub unsafe fn _mm512_mask_shuffle_f32x4( a: __m512, b: __m512, ) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_f32x4::(a, b); transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16())) } @@ -21742,7 +21742,7 @@ pub unsafe fn _mm512_maskz_shuffle_f32x4( a: __m512, b: __m512, ) -> __m512 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_f32x4::(a, b); let zero = _mm512_setzero_ps().as_f32x16(); transmute(simd_select_bitmask(k, r.as_f32x16(), zero)) @@ -21756,7 +21756,7 @@ pub unsafe fn _mm512_maskz_shuffle_f32x4( #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_f32x8(); let b = b.as_f32x8(); let r: f32x8 = simd_shuffle!( @@ -21789,7 +21789,7 @@ pub unsafe fn _mm256_mask_shuffle_f32x4( a: __m256, b: __m256, ) -> __m256 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_f32x4::(a, b); transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8())) } @@ -21806,7 +21806,7 @@ pub unsafe fn _mm256_maskz_shuffle_f32x4( a: __m256, b: __m256, ) -> __m256 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_f32x4::(a, b); let zero = _mm256_setzero_ps().as_f32x8(); transmute(simd_select_bitmask(k, r.as_f32x8(), zero)) @@ -21820,7 +21820,7 @@ pub unsafe fn _mm256_maskz_shuffle_f32x4( #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_f64x8(); let b = b.as_f64x8(); let r: f64x8 = simd_shuffle!( @@ -21853,7 +21853,7 @@ pub unsafe fn _mm512_mask_shuffle_f64x2( a: __m512d, b: __m512d, ) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_f64x2::(a, b); transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8())) } @@ -21870,7 +21870,7 @@ pub unsafe fn _mm512_maskz_shuffle_f64x2( a: __m512d, b: __m512d, ) -> __m512d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_f64x2::(a, b); let zero = _mm512_setzero_pd().as_f64x8(); transmute(simd_select_bitmask(k, r.as_f64x8(), zero)) @@ -21884,7 +21884,7 @@ pub unsafe fn _mm512_maskz_shuffle_f64x2( #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff64x2 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let a = a.as_f64x4(); let b = b.as_f64x4(); let r: f64x4 = simd_shuffle!( @@ -21913,7 +21913,7 @@ pub unsafe fn _mm256_mask_shuffle_f64x2( a: __m256d, b: __m256d, ) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_f64x2::(a, b); transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4())) } @@ -21930,7 +21930,7 @@ pub unsafe fn _mm256_maskz_shuffle_f64x2( a: __m256d, b: __m256d, ) -> __m256d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_f64x2::(a, b); let zero = _mm256_setzero_pd().as_f64x4(); transmute(simd_select_bitmask(k, r.as_f64x4(), zero)) @@ -21947,7 +21947,7 @@ pub unsafe fn _mm256_maskz_shuffle_f64x2( )] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); match IMM8 & 0x3 { 0 => simd_shuffle!(a, _mm512_undefined_ps(), [0, 1, 2, 3]), 1 => simd_shuffle!(a, _mm512_undefined_ps(), [4, 5, 6, 7]), @@ -21971,7 +21971,7 @@ pub unsafe fn _mm512_mask_extractf32x4_ps( k: __mmask8, a: __m512, ) -> __m128 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let r = _mm512_extractf32x4_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4())) } @@ -21987,7 +21987,7 @@ pub unsafe fn _mm512_mask_extractf32x4_ps( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m512) -> __m128 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let r = _mm512_extractf32x4_ps::(a); let zero = _mm_setzero_ps().as_f32x4(); transmute(simd_select_bitmask(k, r.as_f32x4(), zero)) @@ -22004,7 +22004,7 @@ pub unsafe fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m5 )] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); match IMM8 & 0x1 { 0 => simd_shuffle!(a, _mm256_undefined_ps(), [0, 1, 2, 3]), _ => simd_shuffle!(a, _mm256_undefined_ps(), [4, 5, 6, 7]), @@ -22026,7 +22026,7 @@ pub unsafe fn _mm256_mask_extractf32x4_ps( k: __mmask8, a: __m256, ) -> __m128 { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm256_extractf32x4_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4())) } @@ -22042,7 +22042,7 @@ pub unsafe fn _mm256_mask_extractf32x4_ps( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m256) -> __m128 { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm256_extractf32x4_ps::(a); let zero = _mm_setzero_ps().as_f32x4(); transmute(simd_select_bitmask(k, r.as_f32x4(), zero)) @@ -22059,7 +22059,7 @@ pub unsafe fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m2 )] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); match IMM1 { 0 => simd_shuffle!(a, _mm512_set1_epi64(0), [0, 1, 2, 3]), _ => simd_shuffle!(a, _mm512_set1_epi64(0), [4, 5, 6, 7]), @@ -22081,7 +22081,7 @@ pub unsafe fn _mm512_mask_extracti64x4_epi64( k: __mmask8, a: __m512i, ) -> __m256i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let r = _mm512_extracti64x4_epi64::(a); transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4())) } @@ -22097,7 +22097,7 @@ pub unsafe fn _mm512_mask_extracti64x4_epi64( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: __m512i) -> __m256i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let r = _mm512_extracti64x4_epi64::(a); let zero = _mm256_setzero_si256().as_i64x4(); transmute(simd_select_bitmask(k, r.as_i64x4(), zero)) @@ -22114,7 +22114,7 @@ pub unsafe fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: _ )] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); match IMM8 & 0x1 { 0 => simd_shuffle!(a, _mm512_undefined_pd(), [0, 1, 2, 3]), _ => simd_shuffle!(a, _mm512_undefined_pd(), [4, 5, 6, 7]), @@ -22136,7 +22136,7 @@ pub unsafe fn _mm512_mask_extractf64x4_pd( k: __mmask8, a: __m512d, ) -> __m256d { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm512_extractf64x4_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4())) } @@ -22152,7 +22152,7 @@ pub unsafe fn _mm512_mask_extractf64x4_pd( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m512d) -> __m256d { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm512_extractf64x4_pd::(a); let zero = _mm256_setzero_pd().as_f64x4(); transmute(simd_select_bitmask(k, r.as_f64x4(), zero)) @@ -22169,7 +22169,7 @@ pub unsafe fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m5 )] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); let a = a.as_i32x16(); let undefined = _mm512_undefined_epi32().as_i32x16(); let extract: i32x4 = match IMM2 { @@ -22196,7 +22196,7 @@ pub unsafe fn _mm512_mask_extracti32x4_epi32( k: __mmask8, a: __m512i, ) -> __m128i { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); let r = _mm512_extracti32x4_epi32::(a); transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4())) } @@ -22212,7 +22212,7 @@ pub unsafe fn _mm512_mask_extracti32x4_epi32( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: __m512i) -> __m128i { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); let r = _mm512_extracti32x4_epi32::(a); let zero = _mm_setzero_si128().as_i32x4(); transmute(simd_select_bitmask(k, r.as_i32x4(), zero)) @@ -22229,7 +22229,7 @@ pub unsafe fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: _ )] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let a = a.as_i32x8(); let undefined = _mm256_undefined_si256().as_i32x8(); let extract: i32x4 = match IMM1 { @@ -22254,7 +22254,7 @@ pub unsafe fn _mm256_mask_extracti32x4_epi32( k: __mmask8, a: __m256i, ) -> __m128i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let r = _mm256_extracti32x4_epi32::(a); transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4())) } @@ -22270,7 +22270,7 @@ pub unsafe fn _mm256_mask_extracti32x4_epi32( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_extracti32x4_epi32(k: __mmask8, a: __m256i) -> __m128i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); let r = _mm256_extracti32x4_epi32::(a); let zero = _mm_setzero_si128().as_i32x4(); transmute(simd_select_bitmask(k, r.as_i32x4(), zero)) @@ -22524,7 +22524,7 @@ pub unsafe fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] //should be vinserti32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let a = a.as_i32x16(); let b = _mm512_castsi128_si512(b).as_i32x16(); let ret: i32x16 = match IMM8 & 0b11 { @@ -22561,7 +22561,7 @@ pub unsafe fn _mm512_mask_inserti32x4( a: __m512i, b: __m128i, ) -> __m512i { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let r = _mm512_inserti32x4::(a, b); transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16())) } @@ -22578,7 +22578,7 @@ pub unsafe fn _mm512_maskz_inserti32x4( a: __m512i, b: __m128i, ) -> __m512i { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let r = _mm512_inserti32x4::(a, b); let zero = _mm512_setzero_si512().as_i32x16(); transmute(simd_select_bitmask(k, r.as_i32x16(), zero)) @@ -22595,7 +22595,7 @@ pub unsafe fn _mm512_maskz_inserti32x4( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m256i { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let a = a.as_i32x8(); let b = _mm256_castsi128_si256(b).as_i32x8(); let ret: i32x8 = match IMM8 & 0b1 { @@ -22621,7 +22621,7 @@ pub unsafe fn _mm256_mask_inserti32x4( a: __m256i, b: __m128i, ) -> __m256i { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm256_inserti32x4::(a, b); transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8())) } @@ -22641,7 +22641,7 @@ pub unsafe fn _mm256_maskz_inserti32x4( a: __m256i, b: __m128i, ) -> __m256i { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm256_inserti32x4::(a, b); let zero = _mm256_setzero_si256().as_i32x8(); transmute(simd_select_bitmask(k, r.as_i32x8(), zero)) @@ -22655,7 +22655,7 @@ pub unsafe fn _mm256_maskz_inserti32x4( #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] //should be vinserti64x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let b = _mm512_castsi256_si512(b); match IMM8 & 0b1 { 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]), @@ -22676,7 +22676,7 @@ pub unsafe fn _mm512_mask_inserti64x4( a: __m512i, b: __m256i, ) -> __m512i { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm512_inserti64x4::(a, b); transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8())) } @@ -22693,7 +22693,7 @@ pub unsafe fn _mm512_maskz_inserti64x4( a: __m512i, b: __m256i, ) -> __m512i { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm512_inserti64x4::(a, b); let zero = _mm512_setzero_si512().as_i64x8(); transmute(simd_select_bitmask(k, r.as_i64x8(), zero)) @@ -22707,7 +22707,7 @@ pub unsafe fn _mm512_maskz_inserti64x4( #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let b = _mm512_castps128_ps512(b); match IMM8 & 0b11 { 0 => simd_shuffle!( @@ -22742,7 +22742,7 @@ pub unsafe fn _mm512_mask_insertf32x4( a: __m512, b: __m128, ) -> __m512 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let r = _mm512_insertf32x4::(a, b); transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16())) } @@ -22759,7 +22759,7 @@ pub unsafe fn _mm512_maskz_insertf32x4( a: __m512, b: __m128, ) -> __m512 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); let r = _mm512_insertf32x4::(a, b); let zero = _mm512_setzero_ps().as_f32x16(); transmute(simd_select_bitmask(k, r.as_f32x16(), zero)) @@ -22776,7 +22776,7 @@ pub unsafe fn _mm512_maskz_insertf32x4( )] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m256 { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let b = _mm256_castps128_ps256(b); match IMM8 & 0b1 { 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]), @@ -22800,7 +22800,7 @@ pub unsafe fn _mm256_mask_insertf32x4( a: __m256, b: __m128, ) -> __m256 { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm256_insertf32x4::(a, b); transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8())) } @@ -22820,7 +22820,7 @@ pub unsafe fn _mm256_maskz_insertf32x4( a: __m256, b: __m128, ) -> __m256 { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm256_insertf32x4::(a, b); let zero = _mm256_setzero_ps().as_f32x8(); transmute(simd_select_bitmask(k, r.as_f32x8(), zero)) @@ -22834,7 +22834,7 @@ pub unsafe fn _mm256_maskz_insertf32x4( #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let b = _mm512_castpd256_pd512(b); match IMM8 & 0b1 { 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]), @@ -22855,7 +22855,7 @@ pub unsafe fn _mm512_mask_insertf64x4( a: __m512d, b: __m256d, ) -> __m512d { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm512_insertf64x4::(a, b); transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8())) } @@ -22872,7 +22872,7 @@ pub unsafe fn _mm512_maskz_insertf64x4( a: __m512d, b: __m256d, ) -> __m512d { - static_assert_imm1!(IMM8); + static_assert_uimm_bits!(IMM8, 1); let r = _mm512_insertf64x4::(a, b); let zero = _mm512_setzero_pd().as_f64x8(); transmute(simd_select_bitmask(k, r.as_f64x8(), zero)) @@ -24473,7 +24473,7 @@ pub unsafe fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); let b = b.as_i32x16(); let imm8: i32 = IMM8 % 16; @@ -24551,7 +24551,7 @@ pub unsafe fn _mm512_mask_alignr_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi32::(a, b); transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16())) } @@ -24568,7 +24568,7 @@ pub unsafe fn _mm512_maskz_alignr_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi32::(a, b); let zero = _mm512_setzero_si512().as_i32x16(); transmute(simd_select_bitmask(k, r.as_i32x16(), zero)) @@ -24582,7 +24582,7 @@ pub unsafe fn _mm512_maskz_alignr_epi32( #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); let b = b.as_i32x8(); let imm8: i32 = IMM8 % 16; @@ -24620,7 +24620,7 @@ pub unsafe fn _mm256_mask_alignr_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi32::(a, b); transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8())) } @@ -24637,7 +24637,7 @@ pub unsafe fn _mm256_maskz_alignr_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi32::(a, b); let zero = _mm256_setzero_si256().as_i32x8(); transmute(simd_select_bitmask(k, r.as_i32x8(), zero)) @@ -24651,7 +24651,7 @@ pub unsafe fn _mm256_maskz_alignr_epi32( #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let b = b.as_i32x4(); let imm8: i32 = IMM8 % 8; @@ -24681,7 +24681,7 @@ pub unsafe fn _mm_mask_alignr_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi32::(a, b); transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4())) } @@ -24698,7 +24698,7 @@ pub unsafe fn _mm_maskz_alignr_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi32::(a, b); let zero = _mm_setzero_si128().as_i32x4(); transmute(simd_select_bitmask(k, r.as_i32x4(), zero)) @@ -24712,7 +24712,7 @@ pub unsafe fn _mm_maskz_alignr_epi32( #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8: i32 = IMM8 % 8; let r: i64x8 = match imm8 { 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15]), @@ -24740,7 +24740,7 @@ pub unsafe fn _mm512_mask_alignr_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi64::(a, b); transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8())) } @@ -24757,7 +24757,7 @@ pub unsafe fn _mm512_maskz_alignr_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi64::(a, b); let zero = _mm512_setzero_si512().as_i64x8(); transmute(simd_select_bitmask(k, r.as_i64x8(), zero)) @@ -24771,7 +24771,7 @@ pub unsafe fn _mm512_maskz_alignr_epi64( #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8: i32 = IMM8 % 8; let r: i64x4 = match imm8 { 0 => simd_shuffle!(a, b, [4, 5, 6, 7]), @@ -24799,7 +24799,7 @@ pub unsafe fn _mm256_mask_alignr_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi64::(a, b); transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4())) } @@ -24816,7 +24816,7 @@ pub unsafe fn _mm256_maskz_alignr_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi64::(a, b); let zero = _mm256_setzero_si256().as_i64x4(); transmute(simd_select_bitmask(k, r.as_i64x4(), zero)) @@ -24830,7 +24830,7 @@ pub unsafe fn _mm256_maskz_alignr_epi64( #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8: i32 = IMM8 % 4; let r: i64x2 = match imm8 { 0 => simd_shuffle!(a, b, [2, 3]), @@ -24854,7 +24854,7 @@ pub unsafe fn _mm_mask_alignr_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi64::(a, b); transmute(simd_select_bitmask(k, r.as_i64x2(), src.as_i64x2())) } @@ -24871,7 +24871,7 @@ pub unsafe fn _mm_maskz_alignr_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi64::(a, b); let zero = _mm_setzero_si128().as_i64x2(); transmute(simd_select_bitmask(k, r.as_i64x2(), zero)) @@ -26579,7 +26579,7 @@ pub unsafe fn _mm512_mask_cmpneq_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_ps_mask(a: __m512, b: __m512) -> __mmask16 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let a = a.as_f32x16(); let b = b.as_f32x16(); @@ -26599,7 +26599,7 @@ pub unsafe fn _mm512_mask_cmp_ps_mask( a: __m512, b: __m512, ) -> __mmask16 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let a = a.as_f32x16(); let b = b.as_f32x16(); let r = vcmpps(a, b, IMM8, k1 as i16, _MM_FROUND_CUR_DIRECTION); @@ -26614,7 +26614,7 @@ pub unsafe fn _mm512_mask_cmp_ps_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_ps_mask(a: __m256, b: __m256) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let a = a.as_f32x8(); let b = b.as_f32x8(); @@ -26634,7 +26634,7 @@ pub unsafe fn _mm256_mask_cmp_ps_mask( a: __m256, b: __m256, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let a = a.as_f32x8(); let b = b.as_f32x8(); let r = vcmpps256(a, b, IMM8, k1 as i8); @@ -26649,7 +26649,7 @@ pub unsafe fn _mm256_mask_cmp_ps_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_ps_mask(a: __m128, b: __m128) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -26669,7 +26669,7 @@ pub unsafe fn _mm_mask_cmp_ps_mask( a: __m128, b: __m128, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let a = a.as_f32x4(); let b = b.as_f32x4(); let r = vcmpps128(a, b, IMM8, k1 as i8); @@ -26688,7 +26688,7 @@ pub unsafe fn _mm512_cmp_round_ps_mask( a: __m512, b: __m512, ) -> __mmask16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let neg_one = -1; let a = a.as_f32x16(); @@ -26710,7 +26710,7 @@ pub unsafe fn _mm512_mask_cmp_round_ps_mask( a: __m512, b: __m512, ) -> __mmask16 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let a = a.as_f32x16(); let b = b.as_f32x16(); @@ -26886,7 +26886,7 @@ pub unsafe fn _mm512_mask_cmpneq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) - #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let a = a.as_f64x8(); let b = b.as_f64x8(); @@ -26906,7 +26906,7 @@ pub unsafe fn _mm512_mask_cmp_pd_mask( a: __m512d, b: __m512d, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let a = a.as_f64x8(); let b = b.as_f64x8(); let r = vcmppd(a, b, IMM8, k1 as i8, _MM_FROUND_CUR_DIRECTION); @@ -26921,7 +26921,7 @@ pub unsafe fn _mm512_mask_cmp_pd_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_pd_mask(a: __m256d, b: __m256d) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let a = a.as_f64x4(); let b = b.as_f64x4(); @@ -26941,7 +26941,7 @@ pub unsafe fn _mm256_mask_cmp_pd_mask( a: __m256d, b: __m256d, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let a = a.as_f64x4(); let b = b.as_f64x4(); let r = vcmppd256(a, b, IMM8, k1 as i8); @@ -26956,7 +26956,7 @@ pub unsafe fn _mm256_mask_cmp_pd_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_pd_mask(a: __m128d, b: __m128d) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -26976,7 +26976,7 @@ pub unsafe fn _mm_mask_cmp_pd_mask( a: __m128d, b: __m128d, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let a = a.as_f64x2(); let b = b.as_f64x2(); let r = vcmppd128(a, b, IMM8, k1 as i8); @@ -26995,7 +26995,7 @@ pub unsafe fn _mm512_cmp_round_pd_mask( a: __m512d, b: __m512d, ) -> __mmask8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let neg_one = -1; let a = a.as_f64x8(); @@ -27017,7 +27017,7 @@ pub unsafe fn _mm512_mask_cmp_round_pd_mask( a: __m512d, b: __m512d, ) -> __mmask8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let a = a.as_f64x8(); let b = b.as_f64x8(); @@ -27073,7 +27073,7 @@ pub unsafe fn _mm512_mask_cmpunord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_ss_mask(a: __m128, b: __m128) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let r = vcmpss(a, b, IMM8, neg_one, _MM_FROUND_CUR_DIRECTION); transmute(r) @@ -27091,7 +27091,7 @@ pub unsafe fn _mm_mask_cmp_ss_mask( a: __m128, b: __m128, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let r = vcmpss(a, b, IMM8, k1 as i8, _MM_FROUND_CUR_DIRECTION); transmute(r) } @@ -27108,7 +27108,7 @@ pub unsafe fn _mm_cmp_round_ss_mask( a: __m128, b: __m128, ) -> __mmask8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let neg_one = -1; let r = vcmpss(a, b, IMM5, neg_one, SAE); @@ -27128,7 +27128,7 @@ pub unsafe fn _mm_mask_cmp_round_ss_mask( a: __m128, b: __m128, ) -> __mmask8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let r = vcmpss(a, b, IMM5, k1 as i8, SAE); transmute(r) @@ -27142,7 +27142,7 @@ pub unsafe fn _mm_mask_cmp_round_ss_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_sd_mask(a: __m128d, b: __m128d) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let neg_one = -1; let r = vcmpsd(a, b, IMM8, neg_one, _MM_FROUND_CUR_DIRECTION); transmute(r) @@ -27160,7 +27160,7 @@ pub unsafe fn _mm_mask_cmp_sd_mask( a: __m128d, b: __m128d, ) -> __mmask8 { - static_assert_imm5!(IMM8); + static_assert_uimm_bits!(IMM8, 5); let r = vcmpsd(a, b, IMM8, k1 as i8, _MM_FROUND_CUR_DIRECTION); transmute(r) } @@ -27177,7 +27177,7 @@ pub unsafe fn _mm_cmp_round_sd_mask( a: __m128d, b: __m128d, ) -> __mmask8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let neg_one = -1; let r = vcmpsd(a, b, IMM5, neg_one, SAE); @@ -27197,7 +27197,7 @@ pub unsafe fn _mm_mask_cmp_round_sd_mask( a: __m128d, b: __m128d, ) -> __mmask8 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let r = vcmpsd(a, b, IMM5, k1 as i8, SAE); transmute(r) @@ -27574,7 +27574,7 @@ pub unsafe fn _mm512_cmp_epu32_mask( a: __m512i, b: __m512i, ) -> __mmask16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i32x16(); let b = b.as_i32x16(); @@ -27594,7 +27594,7 @@ pub unsafe fn _mm512_mask_cmp_epu32_mask( a: __m512i, b: __m512i, ) -> __mmask16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x16(); let b = b.as_i32x16(); let r = vpcmpud(a, b, IMM3, k1 as i16); @@ -27612,7 +27612,7 @@ pub unsafe fn _mm256_cmp_epu32_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i32x8(); let b = b.as_i32x8(); @@ -27632,7 +27632,7 @@ pub unsafe fn _mm256_mask_cmp_epu32_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x8(); let b = b.as_i32x8(); let r = vpcmpud256(a, b, IMM3, k1 as i8); @@ -27647,7 +27647,7 @@ pub unsafe fn _mm256_mask_cmp_epu32_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -27667,7 +27667,7 @@ pub unsafe fn _mm_mask_cmp_epu32_mask( a: __m128i, b: __m128i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x4(); let b = b.as_i32x4(); let r = vpcmpud128(a, b, IMM3, k1 as i8); @@ -28045,7 +28045,7 @@ pub unsafe fn _mm512_cmp_epi32_mask( a: __m512i, b: __m512i, ) -> __mmask16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i32x16(); let b = b.as_i32x16(); @@ -28065,7 +28065,7 @@ pub unsafe fn _mm512_mask_cmp_epi32_mask( a: __m512i, b: __m512i, ) -> __mmask16 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x16(); let b = b.as_i32x16(); let r = vpcmpd(a, b, IMM3, k1 as i16); @@ -28083,7 +28083,7 @@ pub unsafe fn _mm256_cmp_epi32_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i32x8(); let b = b.as_i32x8(); @@ -28103,7 +28103,7 @@ pub unsafe fn _mm256_mask_cmp_epi32_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x8(); let b = b.as_i32x8(); let r = vpcmpd256(a, b, IMM3, k1 as i8); @@ -28118,7 +28118,7 @@ pub unsafe fn _mm256_mask_cmp_epi32_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -28138,7 +28138,7 @@ pub unsafe fn _mm_mask_cmp_epi32_mask( a: __m128i, b: __m128i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x4(); let b = b.as_i32x4(); let r = vpcmpd128(a, b, IMM3, k1 as i8); @@ -28516,7 +28516,7 @@ pub unsafe fn _mm512_cmp_epu64_mask( a: __m512i, b: __m512i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i64x8(); let b = b.as_i64x8(); @@ -28536,7 +28536,7 @@ pub unsafe fn _mm512_mask_cmp_epu64_mask( a: __m512i, b: __m512i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x8(); let b = b.as_i64x8(); let r = vpcmpuq(a, b, IMM3, k1 as i8); @@ -28554,7 +28554,7 @@ pub unsafe fn _mm256_cmp_epu64_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i64x4(); let b = b.as_i64x4(); @@ -28574,7 +28574,7 @@ pub unsafe fn _mm256_mask_cmp_epu64_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x4(); let b = b.as_i64x4(); let r = vpcmpuq256(a, b, IMM3, k1 as i8); @@ -28589,7 +28589,7 @@ pub unsafe fn _mm256_mask_cmp_epu64_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i64x2(); let b = b.as_i64x2(); @@ -28609,7 +28609,7 @@ pub unsafe fn _mm_mask_cmp_epu64_mask( a: __m128i, b: __m128i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x2(); let b = b.as_i64x2(); let r = vpcmpuq128(a, b, IMM3, k1 as i8); @@ -28987,7 +28987,7 @@ pub unsafe fn _mm512_cmp_epi64_mask( a: __m512i, b: __m512i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i64x8(); let b = b.as_i64x8(); @@ -29007,7 +29007,7 @@ pub unsafe fn _mm512_mask_cmp_epi64_mask( a: __m512i, b: __m512i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x8(); let b = b.as_i64x8(); let r = vpcmpq(a, b, IMM3, k1 as i8); @@ -29025,7 +29025,7 @@ pub unsafe fn _mm256_cmp_epi64_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i64x4(); let b = b.as_i64x4(); @@ -29045,7 +29045,7 @@ pub unsafe fn _mm256_mask_cmp_epi64_mask( a: __m256i, b: __m256i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x4(); let b = b.as_i64x4(); let r = vpcmpq256(a, b, IMM3, k1 as i8); @@ -29060,7 +29060,7 @@ pub unsafe fn _mm256_mask_cmp_epi64_mask( #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let neg_one = -1; let a = a.as_i64x2(); let b = b.as_i64x2(); @@ -29080,7 +29080,7 @@ pub unsafe fn _mm_mask_cmp_epi64_mask( a: __m128i, b: __m128i, ) -> __mmask8 { - static_assert_imm3!(IMM3); + static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x2(); let b = b.as_i64x2(); let r = vpcmpq128(a, b, IMM3, k1 as i8); @@ -33066,8 +33066,8 @@ pub unsafe fn _mm_getmant_ss< a: __m128, b: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x4(); let b = b.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); @@ -33101,8 +33101,8 @@ pub unsafe fn _mm_mask_getmant_ss< a: __m128, b: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x4(); let b = b.as_f32x4(); let src = src.as_f32x4(); @@ -33135,8 +33135,8 @@ pub unsafe fn _mm_maskz_getmant_ss< a: __m128, b: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f32x4(); let b = b.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); @@ -33168,8 +33168,8 @@ pub unsafe fn _mm_getmant_sd< a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x2(); let b = b.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); @@ -33203,8 +33203,8 @@ pub unsafe fn _mm_mask_getmant_sd< a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x2(); let b = b.as_f64x2(); let src = src.as_f64x2(); @@ -33237,8 +33237,8 @@ pub unsafe fn _mm_maskz_getmant_sd< a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); let a = a.as_f64x2(); let b = b.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); @@ -33260,7 +33260,7 @@ pub unsafe fn _mm_maskz_getmant_sd< #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 255))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_roundscale_ss(a: __m128, b: __m128) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); @@ -33287,7 +33287,7 @@ pub unsafe fn _mm_mask_roundscale_ss( a: __m128, b: __m128, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let src = src.as_f32x4(); @@ -33313,7 +33313,7 @@ pub unsafe fn _mm_maskz_roundscale_ss( a: __m128, b: __m128, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let zero = _mm_setzero_ps().as_f32x4(); @@ -33335,7 +33335,7 @@ pub unsafe fn _mm_maskz_roundscale_ss( #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 255))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_roundscale_sd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); @@ -33362,7 +33362,7 @@ pub unsafe fn _mm_mask_roundscale_sd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let src = src.as_f64x2(); @@ -33388,7 +33388,7 @@ pub unsafe fn _mm_maskz_roundscale_sd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let zero = _mm_setzero_pd().as_f64x2(); @@ -35083,8 +35083,8 @@ pub unsafe fn _mm_getmant_round_ss< a: __m128, b: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -35120,8 +35120,8 @@ pub unsafe fn _mm_mask_getmant_round_ss< a: __m128, b: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -35156,8 +35156,8 @@ pub unsafe fn _mm_maskz_getmant_round_ss< a: __m128, b: __m128, ) -> __m128 { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -35191,8 +35191,8 @@ pub unsafe fn _mm_getmant_round_sd< a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -35228,8 +35228,8 @@ pub unsafe fn _mm_mask_getmant_round_sd< a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -35264,8 +35264,8 @@ pub unsafe fn _mm_maskz_getmant_round_sd< a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm4!(NORM); - static_assert_imm2!(SIGN); + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -35292,7 +35292,7 @@ pub unsafe fn _mm_roundscale_round_ss( a: __m128, b: __m128, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -35321,7 +35321,7 @@ pub unsafe fn _mm_mask_roundscale_round_ss( a: __m128, b: __m128, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -35349,7 +35349,7 @@ pub unsafe fn _mm_maskz_roundscale_round_ss( a: __m128, b: __m128, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -35376,7 +35376,7 @@ pub unsafe fn _mm_roundscale_round_sd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -35405,7 +35405,7 @@ pub unsafe fn _mm_mask_roundscale_round_sd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -35433,7 +35433,7 @@ pub unsafe fn _mm_maskz_roundscale_round_sd( a: __m128d, b: __m128d, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -36597,7 +36597,7 @@ pub unsafe fn _mm_mask3_fnmsub_round_sd( #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_ss(a: __m128, b: __m128, c: __m128i) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let c = c.as_i32x4(); @@ -36620,7 +36620,7 @@ pub unsafe fn _mm_mask_fixupimm_ss( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let c = c.as_i32x4(); @@ -36643,7 +36643,7 @@ pub unsafe fn _mm_maskz_fixupimm_ss( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f32x4(); let b = b.as_f32x4(); let c = c.as_i32x4(); @@ -36661,7 +36661,7 @@ pub unsafe fn _mm_maskz_fixupimm_ss( #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_sd(a: __m128d, b: __m128d, c: __m128i) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let c = c.as_i64x2(); @@ -36684,7 +36684,7 @@ pub unsafe fn _mm_mask_fixupimm_sd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let c = c.as_i64x2(); @@ -36707,7 +36707,7 @@ pub unsafe fn _mm_maskz_fixupimm_sd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_f64x2(); let b = b.as_f64x2(); let c = c.as_i64x2(); @@ -36730,7 +36730,7 @@ pub unsafe fn _mm_fixupimm_round_ss( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -36755,7 +36755,7 @@ pub unsafe fn _mm_mask_fixupimm_round_ss( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -36780,7 +36780,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_ss( b: __m128, c: __m128i, ) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -36804,7 +36804,7 @@ pub unsafe fn _mm_fixupimm_round_sd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -36829,7 +36829,7 @@ pub unsafe fn _mm_mask_fixupimm_round_sd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -36854,7 +36854,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_sd( b: __m128d, c: __m128i, ) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -37473,7 +37473,7 @@ pub unsafe fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d { #[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomiss #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_comi_round_ss(a: __m128, b: __m128) -> i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -37490,7 +37490,7 @@ pub unsafe fn _mm_comi_round_ss(a: __m128, b: _ #[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomisd #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_comi_round_sd(a: __m128d, b: __m128d) -> i32 { - static_assert_imm5!(IMM5); + static_assert_uimm_bits!(IMM5, 5); static_assert_mantissas_sae!(SAE); let a = a.as_f64x2(); let b = b.as_f64x2(); diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs b/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs index f6543bfbd07c..404443e9e6dc 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs @@ -1214,7 +1214,7 @@ pub unsafe fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m1 #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; transmute(vpshldvq( a.as_i64x8(), @@ -1236,7 +1236,7 @@ pub unsafe fn _mm512_mask_shldi_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x8 = vpshldvq( a.as_i64x8(), @@ -1258,7 +1258,7 @@ pub unsafe fn _mm512_maskz_shldi_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x8 = vpshldvq( a.as_i64x8(), @@ -1277,7 +1277,7 @@ pub unsafe fn _mm512_maskz_shldi_epi64( #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; transmute(vpshldvq256( a.as_i64x4(), @@ -1299,7 +1299,7 @@ pub unsafe fn _mm256_mask_shldi_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x4 = vpshldvq256( a.as_i64x4(), @@ -1321,7 +1321,7 @@ pub unsafe fn _mm256_maskz_shldi_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x4 = vpshldvq256( a.as_i64x4(), @@ -1340,7 +1340,7 @@ pub unsafe fn _mm256_maskz_shldi_epi64( #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; transmute(vpshldvq128( a.as_i64x2(), @@ -1362,7 +1362,7 @@ pub unsafe fn _mm_mask_shldi_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x2 = vpshldvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2()); transmute(simd_select_bitmask(k, shf, src.as_i64x2())) @@ -1380,7 +1380,7 @@ pub unsafe fn _mm_maskz_shldi_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x2 = vpshldvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2()); let zero = _mm_setzero_si128().as_i64x2(); @@ -1395,7 +1395,7 @@ pub unsafe fn _mm_maskz_shldi_epi64( #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vpshldvd( a.as_i32x16(), b.as_i32x16(), @@ -1416,7 +1416,7 @@ pub unsafe fn _mm512_mask_shldi_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x16 = vpshldvd( a.as_i32x16(), b.as_i32x16(), @@ -1437,7 +1437,7 @@ pub unsafe fn _mm512_maskz_shldi_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x16 = vpshldvd( a.as_i32x16(), b.as_i32x16(), @@ -1455,7 +1455,7 @@ pub unsafe fn _mm512_maskz_shldi_epi32( #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vpshldvd256( a.as_i32x8(), b.as_i32x8(), @@ -1476,7 +1476,7 @@ pub unsafe fn _mm256_mask_shldi_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x8 = vpshldvd256( a.as_i32x8(), b.as_i32x8(), @@ -1497,7 +1497,7 @@ pub unsafe fn _mm256_maskz_shldi_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x8 = vpshldvd256( a.as_i32x8(), b.as_i32x8(), @@ -1515,7 +1515,7 @@ pub unsafe fn _mm256_maskz_shldi_epi32( #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vpshldvd128( a.as_i32x4(), b.as_i32x4(), @@ -1536,7 +1536,7 @@ pub unsafe fn _mm_mask_shldi_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x4 = vpshldvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4()); transmute(simd_select_bitmask(k, shf, src.as_i32x4())) } @@ -1553,7 +1553,7 @@ pub unsafe fn _mm_maskz_shldi_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x4 = vpshldvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4()); let zero = _mm_setzero_si128().as_i32x4(); transmute(simd_select_bitmask(k, shf, zero)) @@ -1567,7 +1567,7 @@ pub unsafe fn _mm_maskz_shldi_epi32( #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; transmute(vpshldvw( a.as_i16x32(), @@ -1589,7 +1589,7 @@ pub unsafe fn _mm512_mask_shldi_epi16( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x32 = vpshldvw( a.as_i16x32(), @@ -1611,7 +1611,7 @@ pub unsafe fn _mm512_maskz_shldi_epi16( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x32 = vpshldvw( a.as_i16x32(), @@ -1630,7 +1630,7 @@ pub unsafe fn _mm512_maskz_shldi_epi16( #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; transmute(vpshldvw256( a.as_i16x16(), @@ -1652,7 +1652,7 @@ pub unsafe fn _mm256_mask_shldi_epi16( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x16 = vpshldvw256( a.as_i16x16(), @@ -1674,7 +1674,7 @@ pub unsafe fn _mm256_maskz_shldi_epi16( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x16 = vpshldvw256( a.as_i16x16(), @@ -1693,7 +1693,7 @@ pub unsafe fn _mm256_maskz_shldi_epi16( #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; transmute(vpshldvw128( a.as_i16x8(), @@ -1715,7 +1715,7 @@ pub unsafe fn _mm_mask_shldi_epi16( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x8 = vpshldvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8()); transmute(simd_select_bitmask(k, shf, src.as_i16x8())) @@ -1733,7 +1733,7 @@ pub unsafe fn _mm_maskz_shldi_epi16( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x8 = vpshldvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8()); let zero = _mm_setzero_si128().as_i16x8(); @@ -1748,7 +1748,7 @@ pub unsafe fn _mm_maskz_shldi_epi16( #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; transmute(vpshrdvq( a.as_i64x8(), @@ -1770,7 +1770,7 @@ pub unsafe fn _mm512_mask_shrdi_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x8 = vpshrdvq( a.as_i64x8(), @@ -1792,7 +1792,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi64( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x8 = vpshrdvq( a.as_i64x8(), @@ -1811,7 +1811,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi64( #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; transmute(vpshrdvq256( a.as_i64x4(), @@ -1833,7 +1833,7 @@ pub unsafe fn _mm256_mask_shrdi_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x4 = vpshrdvq256( a.as_i64x4(), @@ -1855,7 +1855,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi64( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x4 = vpshrdvq256( a.as_i64x4(), @@ -1874,7 +1874,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi64( #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; transmute(vpshrdvq128( a.as_i64x2(), @@ -1896,7 +1896,7 @@ pub unsafe fn _mm_mask_shrdi_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x2 = vpshrdvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2()); transmute(simd_select_bitmask(k, shf, src.as_i64x2())) @@ -1914,7 +1914,7 @@ pub unsafe fn _mm_maskz_shrdi_epi64( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i64; let shf: i64x2 = vpshrdvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2()); let zero = _mm_setzero_si128().as_i64x2(); @@ -1929,7 +1929,7 @@ pub unsafe fn _mm_maskz_shrdi_epi64( #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vpshrdvd( a.as_i32x16(), b.as_i32x16(), @@ -1950,7 +1950,7 @@ pub unsafe fn _mm512_mask_shrdi_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x16 = vpshrdvd( a.as_i32x16(), b.as_i32x16(), @@ -1971,7 +1971,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi32( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x16 = vpshrdvd( a.as_i32x16(), b.as_i32x16(), @@ -1989,7 +1989,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi32( #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vpshrdvd256( a.as_i32x8(), b.as_i32x8(), @@ -2010,7 +2010,7 @@ pub unsafe fn _mm256_mask_shrdi_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x8 = vpshrdvd256( a.as_i32x8(), b.as_i32x8(), @@ -2031,7 +2031,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi32( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x8 = vpshrdvd256( a.as_i32x8(), b.as_i32x8(), @@ -2049,7 +2049,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi32( #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(vpshrdvd128( a.as_i32x4(), b.as_i32x4(), @@ -2070,7 +2070,7 @@ pub unsafe fn _mm_mask_shrdi_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x4 = vpshrdvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4()); transmute(simd_select_bitmask(k, shf, src.as_i32x4())) } @@ -2087,7 +2087,7 @@ pub unsafe fn _mm_maskz_shrdi_epi32( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let shf: i32x4 = vpshrdvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4()); let zero = _mm_setzero_si128().as_i32x4(); transmute(simd_select_bitmask(k, shf, zero)) @@ -2101,7 +2101,7 @@ pub unsafe fn _mm_maskz_shrdi_epi32( #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; assert!(imm8 >= 0 && imm8 <= 255); transmute(vpshrdvw( @@ -2124,7 +2124,7 @@ pub unsafe fn _mm512_mask_shrdi_epi16( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; assert!(imm8 >= 0 && imm8 <= 255); let shf: i16x32 = vpshrdvw( @@ -2147,7 +2147,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi16( a: __m512i, b: __m512i, ) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; assert!(imm8 >= 0 && imm8 <= 255); let shf: i16x32 = vpshrdvw( @@ -2167,7 +2167,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi16( #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; assert!(imm8 >= 0 && imm8 <= 255); transmute(vpshrdvw256( @@ -2190,7 +2190,7 @@ pub unsafe fn _mm256_mask_shrdi_epi16( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; assert!(imm8 >= 0 && imm8 <= 255); let shf: i16x16 = vpshrdvw256( @@ -2213,7 +2213,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi16( a: __m256i, b: __m256i, ) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x16 = vpshrdvw256( a.as_i16x16(), @@ -2232,7 +2232,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi16( #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; transmute(vpshrdvw128( a.as_i16x8(), @@ -2254,7 +2254,7 @@ pub unsafe fn _mm_mask_shrdi_epi16( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x8 = vpshrdvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8()); transmute(simd_select_bitmask(k, shf, src.as_i16x8())) @@ -2272,7 +2272,7 @@ pub unsafe fn _mm_maskz_shrdi_epi16( a: __m128i, b: __m128i, ) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let imm8 = IMM8 as i16; let shf: i16x8 = vpshrdvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8()); let zero = _mm_setzero_si128().as_i16x8(); diff --git a/library/stdarch/crates/core_arch/src/x86/f16c.rs b/library/stdarch/crates/core_arch/src/x86/f16c.rs index 1c60f8ebc249..a27a1123afae 100644 --- a/library/stdarch/crates/core_arch/src/x86/f16c.rs +++ b/library/stdarch/crates/core_arch/src/x86/f16c.rs @@ -58,7 +58,7 @@ pub unsafe fn _mm256_cvtph_ps(a: __m128i) -> __m256 { #[cfg_attr(test, assert_instr("vcvtps2ph", IMM_ROUNDING = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtps_ph(a: __m128) -> __m128i { - static_assert_imm3!(IMM_ROUNDING); + static_assert_uimm_bits!(IMM_ROUNDING, 3); let a = a.as_f32x4(); let r = llvm_vcvtps2ph_128(a, IMM_ROUNDING); transmute(r) @@ -79,7 +79,7 @@ pub unsafe fn _mm_cvtps_ph(a: __m128) -> __m128i { #[cfg_attr(test, assert_instr("vcvtps2ph", IMM_ROUNDING = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_cvtps_ph(a: __m256) -> __m128i { - static_assert_imm3!(IMM_ROUNDING); + static_assert_uimm_bits!(IMM_ROUNDING, 3); let a = a.as_f32x8(); let r = llvm_vcvtps2ph_256(a, IMM_ROUNDING); transmute(r) diff --git a/library/stdarch/crates/core_arch/src/x86/gfni.rs b/library/stdarch/crates/core_arch/src/x86/gfni.rs index 2d03960f2da3..7c2195e71374 100644 --- a/library/stdarch/crates/core_arch/src/x86/gfni.rs +++ b/library/stdarch/crates/core_arch/src/x86/gfni.rs @@ -238,7 +238,7 @@ pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _ #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_gf2p8affine_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x64(); let a = a.as_i8x64(); @@ -264,7 +264,7 @@ pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8( x: __m512i, a: __m512i, ) -> __m512i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let zero = _mm512_setzero_si512().as_i8x64(); let x = x.as_i8x64(); @@ -292,7 +292,7 @@ pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8( x: __m512i, a: __m512i, ) -> __m512i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x64(); let a = a.as_i8x64(); @@ -311,7 +311,7 @@ pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8( #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_gf2p8affine_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x32(); let a = a.as_i8x32(); @@ -337,7 +337,7 @@ pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8( x: __m256i, a: __m256i, ) -> __m256i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let zero = _mm256_setzero_si256().as_i8x32(); let x = x.as_i8x32(); @@ -365,7 +365,7 @@ pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8( x: __m256i, a: __m256i, ) -> __m256i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x32(); let a = a.as_i8x32(); @@ -384,7 +384,7 @@ pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8( #[cfg_attr(test, assert_instr(gf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_gf2p8affine_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x16(); let a = a.as_i8x16(); @@ -410,7 +410,7 @@ pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8( x: __m128i, a: __m128i, ) -> __m128i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let zero = _mm_setzero_si128().as_i8x16(); let x = x.as_i8x16(); @@ -438,7 +438,7 @@ pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8( x: __m128i, a: __m128i, ) -> __m128i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x16(); let a = a.as_i8x16(); @@ -459,7 +459,7 @@ pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8( #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x64(); let a = a.as_i8x64(); @@ -487,7 +487,7 @@ pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8( x: __m512i, a: __m512i, ) -> __m512i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let zero = _mm512_setzero_si512().as_i8x64(); let x = x.as_i8x64(); @@ -517,7 +517,7 @@ pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8( x: __m512i, a: __m512i, ) -> __m512i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x64(); let a = a.as_i8x64(); @@ -538,7 +538,7 @@ pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8( #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x32(); let a = a.as_i8x32(); @@ -566,7 +566,7 @@ pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8( x: __m256i, a: __m256i, ) -> __m256i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let zero = _mm256_setzero_si256().as_i8x32(); let x = x.as_i8x32(); @@ -596,7 +596,7 @@ pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8( x: __m256i, a: __m256i, ) -> __m256i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x32(); let a = a.as_i8x32(); @@ -617,7 +617,7 @@ pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8( #[cfg_attr(test, assert_instr(gf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_gf2p8affineinv_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x16(); let a = a.as_i8x16(); @@ -645,7 +645,7 @@ pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8( x: __m128i, a: __m128i, ) -> __m128i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let zero = _mm_setzero_si128().as_i8x16(); let x = x.as_i8x16(); @@ -675,7 +675,7 @@ pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8( x: __m128i, a: __m128i, ) -> __m128i { - static_assert_imm8!(B); + static_assert_uimm_bits!(B, 8); let b = B as u8; let x = x.as_i8x16(); let a = a.as_i8x16(); diff --git a/library/stdarch/crates/core_arch/src/x86/macros.rs b/library/stdarch/crates/core_arch/src/x86/macros.rs index e686e65b3038..17d64f5bbfd1 100644 --- a/library/stdarch/crates/core_arch/src/x86/macros.rs +++ b/library/stdarch/crates/core_arch/src/x86/macros.rs @@ -1,89 +1,44 @@ //! Utility macros. -//! -// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is -// not a round number. -pub(crate) struct ValidateConstRound; -impl ValidateConstRound { - pub(crate) const VALID: () = { - assert!( - IMM == 4 || IMM == 8 || IMM == 9 || IMM == 10 || IMM == 11, - "Invalid IMM value" - ); - }; -} +// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is +// not a round number. #[allow(unused)] macro_rules! static_assert_rounding { ($imm:ident) => { - let _ = $crate::core_arch::x86::macros::ValidateConstRound::<$imm>::VALID; + static_assert!( + $imm == 4 || $imm == 8 || $imm == 9 || $imm == 10 || $imm == 11, + "Invalid IMM value" + ) }; } -// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is +// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is // not a sae number. -pub(crate) struct ValidateConstSae; -impl ValidateConstSae { - pub(crate) const VALID: () = { - assert!(IMM == 4 || IMM == 8, "Invalid IMM value"); - }; -} - #[allow(unused)] macro_rules! static_assert_sae { ($imm:ident) => { - let _ = $crate::core_arch::x86::macros::ValidateConstSae::<$imm>::VALID; + static_assert!($imm == 4 || $imm == 8, "Invalid IMM value") }; } -// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is +// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is // not a mantissas sae number. -pub(crate) struct ValidateConstMantissasSae; -impl ValidateConstMantissasSae { - pub(crate) const VALID: () = { - assert!(IMM == 4 || IMM == 8 || IMM == 12, "Invalid IMM value"); - }; -} - #[allow(unused)] macro_rules! static_assert_mantissas_sae { ($imm:ident) => { - let _ = $crate::core_arch::x86::macros::ValidateConstMantissasSae::<$imm>::VALID; + static_assert!($imm == 4 || $imm == 8 || $imm == 12, "Invalid IMM value") }; } -// Helper struct used to trigger const eval errors when the unsigned const generic immediate value -// `IMM` is out of `[MIN-MAX]` range. -pub(crate) struct ValidateConstImmU32; -impl ValidateConstImmU32 { - pub(crate) const VALID: () = { - assert!(IMM >= MIN && IMM <= MAX, "IMM value not in expected range"); - }; -} - -#[allow(unused_macros)] -macro_rules! static_assert_imm_u8 { - ($imm:ident) => { - let _ = - $crate::core_arch::x86::macros::ValidateConstImmU32::<$imm, 0, { (1 << 8) - 1 }>::VALID; - }; -} - -// Helper struct used to trigger const eval errors when the const generic immediate value `SCALE` is +// Helper macro used to trigger const eval errors when the const generic immediate value `SCALE` is // not valid for gather instructions: the only valid scale values are 1, 2, 4 and 8. -pub(crate) struct ValidateConstGatherScale; -impl ValidateConstGatherScale { - pub(crate) const VALID: () = { - assert!( - SCALE == 1 || SCALE == 2 || SCALE == 4 || SCALE == 8, - "Invalid SCALE value" - ); - }; -} - #[allow(unused)] macro_rules! static_assert_imm8_scale { ($imm:ident) => { - let _ = $crate::core_arch::x86::macros::ValidateConstGatherScale::<$imm>::VALID; + static_assert!( + $imm == 1 || $imm == 2 || $imm == 4 || $imm == 8, + "Invalid SCALE value" + ) }; } diff --git a/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs index 3091078fbd8a..6a5cd73f9392 100644 --- a/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs +++ b/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs @@ -33,7 +33,7 @@ extern "C" { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_clmulepi64_si128(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pclmulqdq(a, b, IMM8 as u8) } diff --git a/library/stdarch/crates/core_arch/src/x86/rtm.rs b/library/stdarch/crates/core_arch/src/x86/rtm.rs index dab73cde9e36..ea1e80057d3b 100644 --- a/library/stdarch/crates/core_arch/src/x86/rtm.rs +++ b/library/stdarch/crates/core_arch/src/x86/rtm.rs @@ -79,7 +79,7 @@ pub unsafe fn _xend() { #[cfg_attr(test, assert_instr(xabort, IMM8 = 0x0))] #[rustc_legacy_const_generics(0)] pub unsafe fn _xabort() { - static_assert_imm_u8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); x86_xabort(IMM8 as i8) } diff --git a/library/stdarch/crates/core_arch/src/x86/sha.rs b/library/stdarch/crates/core_arch/src/x86/sha.rs index 6f7c47b41f5a..5c5e81ba90ec 100644 --- a/library/stdarch/crates/core_arch/src/x86/sha.rs +++ b/library/stdarch/crates/core_arch/src/x86/sha.rs @@ -76,7 +76,7 @@ pub unsafe fn _mm_sha1nexte_epu32(a: __m128i, b: __m128i) -> __m128i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_sha1rnds4_epu32(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm2!(FUNC); + static_assert_uimm_bits!(FUNC, 2); transmute(sha1rnds4(a.as_i32x4(), b.as_i32x4(), FUNC as i8)) } diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index bdd0fb37b002..7e4b352df6ed 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -1010,7 +1010,7 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_shuffle_ps(a: __m128, b: __m128) -> __m128 { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!( a, b, diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs index 3c441abac554..e118ac05f8cc 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse2.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs @@ -423,7 +423,7 @@ pub unsafe fn _mm_subs_epu16(a: __m128i, b: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_slli_si128(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); _mm_slli_si128_impl::(a) } @@ -474,7 +474,7 @@ unsafe fn _mm_slli_si128_impl(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_bslli_si128(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); _mm_slli_si128_impl::(a) } @@ -487,7 +487,7 @@ pub unsafe fn _mm_bslli_si128(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_bsrli_si128(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); _mm_srli_si128_impl::(a) } @@ -500,7 +500,7 @@ pub unsafe fn _mm_bsrli_si128(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_slli_epi16(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pslliw(a.as_i16x8(), IMM8)) } @@ -525,7 +525,7 @@ pub unsafe fn _mm_sll_epi16(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_slli_epi32(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psllid(a.as_i32x4(), IMM8)) } @@ -550,7 +550,7 @@ pub unsafe fn _mm_sll_epi32(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_slli_epi64(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pslliq(a.as_i64x2(), IMM8)) } @@ -576,7 +576,7 @@ pub unsafe fn _mm_sll_epi64(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_srai_epi16(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psraiw(a.as_i16x8(), IMM8)) } @@ -602,7 +602,7 @@ pub unsafe fn _mm_sra_epi16(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_srai_epi32(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psraid(a.as_i32x4(), IMM8)) } @@ -627,7 +627,7 @@ pub unsafe fn _mm_sra_epi32(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_srli_si128(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); _mm_srli_si128_impl::(a) } @@ -679,7 +679,7 @@ unsafe fn _mm_srli_si128_impl(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_srli_epi16(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psrliw(a.as_i16x8(), IMM8)) } @@ -705,7 +705,7 @@ pub unsafe fn _mm_srl_epi16(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_srli_epi32(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psrlid(a.as_i32x4(), IMM8)) } @@ -731,7 +731,7 @@ pub unsafe fn _mm_srl_epi32(a: __m128i, count: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_srli_epi64(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(psrliq(a.as_i64x2(), IMM8)) } @@ -1361,7 +1361,7 @@ pub unsafe fn _mm_packus_epi16(a: __m128i, b: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_extract_epi16(a: __m128i) -> i32 { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); simd_extract::<_, u16>(a.as_u16x8(), IMM8 as u32) as i32 } @@ -1374,7 +1374,7 @@ pub unsafe fn _mm_extract_epi16(a: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_insert_epi16(a: __m128i, i: i32) -> __m128i { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); transmute(simd_insert(a.as_i16x8(), IMM8 as u32, i as i16)) } @@ -1400,7 +1400,7 @@ pub unsafe fn _mm_movemask_epi8(a: __m128i) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_shuffle_epi32(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); let x: i32x4 = simd_shuffle!( a, @@ -1428,7 +1428,7 @@ pub unsafe fn _mm_shuffle_epi32(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_shufflehi_epi16(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x8(); let x: i16x8 = simd_shuffle!( a, @@ -1460,7 +1460,7 @@ pub unsafe fn _mm_shufflehi_epi16(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_shufflelo_epi16(a: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x8(); let x: i16x8 = simd_shuffle!( a, @@ -2656,7 +2656,7 @@ pub unsafe fn _mm_loadu_pd(mem_addr: *const f64) -> __m128d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_shuffle_pd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm8!(MASK); + static_assert_uimm_bits!(MASK, 8); simd_shuffle!(a, b, [MASK as u32 & 0b1, ((MASK as u32 >> 1) & 0b1) + 2]) } diff --git a/library/stdarch/crates/core_arch/src/x86/sse41.rs b/library/stdarch/crates/core_arch/src/x86/sse41.rs index 173bb49d5cec..7ba86e5f79ae 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse41.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse41.rs @@ -81,7 +81,7 @@ pub unsafe fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_blend_epi16(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pblendw(a.as_i16x8(), b.as_i16x8(), IMM8 as u8)) } @@ -122,7 +122,7 @@ pub unsafe fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_blend_pd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm2!(IMM2); + static_assert_uimm_bits!(IMM2, 2); blendpd(a, b, IMM2 as u8) } @@ -136,7 +136,7 @@ pub unsafe fn _mm_blend_pd(a: __m128d, b: __m128d) -> __m128d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_blend_ps(a: __m128, b: __m128) -> __m128 { - static_assert_imm4!(IMM4); + static_assert_uimm_bits!(IMM4, 4); blendps(a, b, IMM4 as u8) } @@ -174,7 +174,7 @@ pub unsafe fn _mm_blend_ps(a: __m128, b: __m128) -> __m128 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_extract_ps(a: __m128) -> i32 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); transmute(simd_extract::<_, f32>(a, IMM8 as u32)) } @@ -190,7 +190,7 @@ pub unsafe fn _mm_extract_ps(a: __m128) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_extract_epi8(a: __m128i) -> i32 { - static_assert_imm4!(IMM8); + static_assert_uimm_bits!(IMM8, 4); simd_extract::<_, u8>(a.as_u8x16(), IMM8 as u32) as i32 } @@ -206,7 +206,7 @@ pub unsafe fn _mm_extract_epi8(a: __m128i) -> i32 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_extract_epi32(a: __m128i) -> i32 { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); simd_extract::<_, i32>(a.as_i32x4(), IMM8 as u32) } @@ -240,7 +240,7 @@ pub unsafe fn _mm_extract_epi32(a: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_insert_ps(a: __m128, b: __m128) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); insertps(a, b, IMM8 as u8) } @@ -254,7 +254,7 @@ pub unsafe fn _mm_insert_ps(a: __m128, b: __m128) -> __m128 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_insert_epi8(a: __m128i, i: i32) -> __m128i { - static_assert_imm4!(IMM8); + static_assert_uimm_bits!(IMM8, 4); transmute(simd_insert(a.as_i8x16(), IMM8 as u32, i as i8)) } @@ -268,7 +268,7 @@ pub unsafe fn _mm_insert_epi8(a: __m128i, i: i32) -> __m128i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_insert_epi32(a: __m128i, i: i32) -> __m128i { - static_assert_imm2!(IMM8); + static_assert_uimm_bits!(IMM8, 2); transmute(simd_insert(a.as_i32x4(), IMM8 as u32, i)) } @@ -582,7 +582,7 @@ pub unsafe fn _mm_cvtepu32_epi64(a: __m128i) -> __m128i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_dp_pd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); dppd(a, b, IMM8 as u8) } @@ -601,7 +601,7 @@ pub unsafe fn _mm_dp_pd(a: __m128d, b: __m128d) -> __m128d { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_dp_ps(a: __m128, b: __m128) -> __m128 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); dpps(a, b, IMM8 as u8) } @@ -754,7 +754,7 @@ pub unsafe fn _mm_ceil_ss(a: __m128, b: __m128) -> __m128 { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_round_pd(a: __m128d) -> __m128d { - static_assert_imm4!(ROUNDING); + static_assert_uimm_bits!(ROUNDING, 4); roundpd(a, ROUNDING) } @@ -795,7 +795,7 @@ pub unsafe fn _mm_round_pd(a: __m128d) -> __m128d { #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_round_ps(a: __m128) -> __m128 { - static_assert_imm4!(ROUNDING); + static_assert_uimm_bits!(ROUNDING, 4); roundps(a, ROUNDING) } @@ -838,7 +838,7 @@ pub unsafe fn _mm_round_ps(a: __m128) -> __m128 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_round_sd(a: __m128d, b: __m128d) -> __m128d { - static_assert_imm4!(ROUNDING); + static_assert_uimm_bits!(ROUNDING, 4); roundsd(a, b, ROUNDING) } @@ -881,7 +881,7 @@ pub unsafe fn _mm_round_sd(a: __m128d, b: __m128d) -> __m12 #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_round_ss(a: __m128, b: __m128) -> __m128 { - static_assert_imm4!(ROUNDING); + static_assert_uimm_bits!(ROUNDING, 4); roundss(a, b, ROUNDING) } @@ -981,7 +981,7 @@ pub unsafe fn _mm_mullo_epi32(a: __m128i, b: __m128i) -> __m128i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_mpsadbw_epu8(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm3!(IMM8); + static_assert_uimm_bits!(IMM8, 3); transmute(mpsadbw(a.as_u8x16(), b.as_u8x16(), IMM8 as u8)) } diff --git a/library/stdarch/crates/core_arch/src/x86/sse42.rs b/library/stdarch/crates/core_arch/src/x86/sse42.rs index 5e2067d3060b..76a6a40757a3 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse42.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse42.rs @@ -74,7 +74,7 @@ pub const _SIDD_UNIT_MASK: i32 = 0b0100_0000; #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistrm(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pcmpistrm128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)) } @@ -262,7 +262,7 @@ pub unsafe fn _mm_cmpistrm(a: __m128i, b: __m128i) -> __m128i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistri(a: __m128i, b: __m128i) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpistri128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8) } @@ -277,7 +277,7 @@ pub unsafe fn _mm_cmpistri(a: __m128i, b: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistrz(a: __m128i, b: __m128i) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpistriz128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8) } @@ -292,7 +292,7 @@ pub unsafe fn _mm_cmpistrz(a: __m128i, b: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistrc(a: __m128i, b: __m128i) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpistric128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8) } @@ -307,7 +307,7 @@ pub unsafe fn _mm_cmpistrc(a: __m128i, b: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistrs(a: __m128i, b: __m128i) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpistris128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8) } @@ -321,7 +321,7 @@ pub unsafe fn _mm_cmpistrs(a: __m128i, b: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistro(a: __m128i, b: __m128i) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpistrio128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8) } @@ -336,7 +336,7 @@ pub unsafe fn _mm_cmpistro(a: __m128i, b: __m128i) -> i32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpistra(a: __m128i, b: __m128i) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpistria128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8) } @@ -350,7 +350,7 @@ pub unsafe fn _mm_cmpistra(a: __m128i, b: __m128i) -> i32 { #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestrm(a: __m128i, la: i32, b: __m128i, lb: i32) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); transmute(pcmpestrm128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)) } @@ -439,7 +439,7 @@ pub unsafe fn _mm_cmpestrm(a: __m128i, la: i32, b: __m128i, lb: #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestri(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpestri128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8) } @@ -454,7 +454,7 @@ pub unsafe fn _mm_cmpestri(a: __m128i, la: i32, b: __m128i, lb: #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestrz(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpestriz128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8) } @@ -469,7 +469,7 @@ pub unsafe fn _mm_cmpestrz(a: __m128i, la: i32, b: __m128i, lb: #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestrc(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpestric128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8) } @@ -484,7 +484,7 @@ pub unsafe fn _mm_cmpestrc(a: __m128i, la: i32, b: __m128i, lb: #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestrs(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpestris128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8) } @@ -499,7 +499,7 @@ pub unsafe fn _mm_cmpestrs(a: __m128i, la: i32, b: __m128i, lb: #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestro(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpestrio128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8) } @@ -515,7 +515,7 @@ pub unsafe fn _mm_cmpestro(a: __m128i, la: i32, b: __m128i, lb: #[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmpestra(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pcmpestria128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8) } diff --git a/library/stdarch/crates/core_arch/src/x86/ssse3.rs b/library/stdarch/crates/core_arch/src/x86/ssse3.rs index 8d5ada2a64c5..bdc6836ac8b0 100644 --- a/library/stdarch/crates/core_arch/src/x86/ssse3.rs +++ b/library/stdarch/crates/core_arch/src/x86/ssse3.rs @@ -90,7 +90,7 @@ pub unsafe fn _mm_shuffle_epi8(a: __m128i, b: __m128i) -> __m128i { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_alignr_epi8(a: __m128i, b: __m128i) -> __m128i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); // If palignr is shifting the pair of vectors more than the size of two // lanes, emit zero. if IMM8 > 32 { diff --git a/library/stdarch/crates/core_arch/src/x86/test.rs b/library/stdarch/crates/core_arch/src/x86/test.rs index bab89e61ac3e..ec4298033b7a 100644 --- a/library/stdarch/crates/core_arch/src/x86/test.rs +++ b/library/stdarch/crates/core_arch/src/x86/test.rs @@ -94,7 +94,7 @@ mod x86_polyfill { #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_insert_epi64(a: __m128i, val: i64) -> __m128i { - static_assert_imm1!(INDEX); + static_assert_uimm_bits!(INDEX, 1); #[repr(C)] union A { a: __m128i, @@ -108,7 +108,7 @@ mod x86_polyfill { #[target_feature(enable = "avx2")] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_insert_epi64(a: __m256i, val: i64) -> __m256i { - static_assert_imm2!(INDEX); + static_assert_uimm_bits!(INDEX, 2); #[repr(C)] union A { a: __m256i, diff --git a/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs index 65cfebd291c2..7a9769fb201a 100644 --- a/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs +++ b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs @@ -37,7 +37,7 @@ extern "C" { #[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_clmulepi64_epi128(a: __m512i, b: __m512i) -> __m512i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pclmulqdq_512(a, b, IMM8 as u8) } @@ -54,7 +54,7 @@ pub unsafe fn _mm512_clmulepi64_epi128(a: __m512i, b: __m512i) #[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_clmulepi64_epi128(a: __m256i, b: __m256i) -> __m256i { - static_assert_imm8!(IMM8); + static_assert_uimm_bits!(IMM8, 8); pclmulqdq_256(a, b, IMM8 as u8) } diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx.rs b/library/stdarch/crates/core_arch/src/x86_64/avx.rs index 27b18f741c6f..f699f6164802 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/avx.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/avx.rs @@ -28,7 +28,7 @@ use crate::{ // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi64(a: __m256i, i: i64) -> __m256i { - static_assert_imm2!(INDEX); + static_assert_uimm_bits!(INDEX, 2); transmute(simd_insert(a.as_i64x4(), INDEX as u32, i)) } diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx2.rs b/library/stdarch/crates/core_arch/src/x86_64/avx2.rs index 17827c0b3f60..3388568eb42e 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/avx2.rs @@ -29,7 +29,7 @@ use crate::core_arch::{simd_llvm::*, x86::*}; // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extract_epi64(a: __m256i) -> i64 { - static_assert_imm2!(INDEX); + static_assert_uimm_bits!(INDEX, 2); simd_extract(a.as_i64x4(), INDEX as u32) } diff --git a/library/stdarch/crates/core_arch/src/x86_64/macros.rs b/library/stdarch/crates/core_arch/src/x86_64/macros.rs index a3ea0e821631..17e1c257c75b 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/macros.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/macros.rs @@ -1,36 +1,22 @@ //! Utility macros. -// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is +// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is // not a round number. -pub(crate) struct ValidateConstRound; -impl ValidateConstRound { - pub(crate) const VALID: () = { - assert!( - IMM == 4 || IMM == 8 || IMM == 9 || IMM == 10 || IMM == 11, - "Invalid IMM value" - ); - }; -} - #[allow(unused)] macro_rules! static_assert_rounding { ($imm:ident) => { - let _ = $crate::core_arch::x86_64::macros::ValidateConstRound::<$imm>::VALID; + static_assert!( + $imm == 4 || $imm == 8 || $imm == 9 || $imm == 10 || $imm == 11, + "Invalid IMM value" + ) }; } -// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is +// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is // not a sae number. -pub(crate) struct ValidateConstSae; -impl ValidateConstSae { - pub(crate) const VALID: () = { - assert!(IMM == 4 || IMM == 8, "Invalid IMM value"); - }; -} - #[allow(unused)] macro_rules! static_assert_sae { ($imm:ident) => { - let _ = $crate::core_arch::x86_64::macros::ValidateConstSae::<$imm>::VALID; + static_assert!($imm == 4 || $imm == 8, "Invalid IMM value") }; } diff --git a/library/stdarch/crates/core_arch/src/x86_64/sse41.rs b/library/stdarch/crates/core_arch/src/x86_64/sse41.rs index cdee4fdfec36..d815a69a7eed 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/sse41.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/sse41.rs @@ -17,7 +17,7 @@ use stdarch_test::assert_instr; #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_extract_epi64(a: __m128i) -> i64 { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); simd_extract(a.as_i64x2(), IMM1 as u32) } @@ -31,7 +31,7 @@ pub unsafe fn _mm_extract_epi64(a: __m128i) -> i64 { #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_insert_epi64(a: __m128i, i: i64) -> __m128i { - static_assert_imm1!(IMM1); + static_assert_uimm_bits!(IMM1, 1); transmute(simd_insert(a.as_i64x2(), IMM1 as u32, i)) } diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs index b91a657aab66..652aee88c893 100644 --- a/library/stdarch/crates/stdarch-gen/src/main.rs +++ b/library/stdarch/crates/stdarch-gen/src/main.rs @@ -2844,12 +2844,9 @@ fn get_call( _ => 0, }; if len == 0 { - return format!( - r#"static_assert!({} : i32 where {} == 0);"#, - fn_format[2], fn_format[2] - ); + return format!(r#"static_assert!({} == 0);"#, fn_format[2]); } else { - return format!(r#"static_assert_imm{len}!({});"#, fn_format[2]); + return format!(r#"static_assert_uimm_bits!({}, {len});"#, fn_format[2]); } } if fn_name.starts_with("static_assert") { @@ -2869,14 +2866,11 @@ fn get_call( fn_format[3].clone() }; if lim1 == lim2 { - return format!( - r#"static_assert!({} : i32 where {} == {lim1});"#, - fn_format[1], fn_format[1] - ); + return format!(r#"static_assert!({} == {lim1});"#, fn_format[1]); } else { return format!( - r#"static_assert!({} : i32 where {} >= {lim1} && {} <= {lim2});"#, - fn_format[1], fn_format[1], fn_format[1] + r#"static_assert!({} >= {lim1} && {} <= {lim2});"#, + fn_format[1], fn_format[1] ); } }