diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs index fc957f54a9e5..c40a0db161a2 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs @@ -7393,15 +7393,11 @@ pub unsafe fn _mm256_mask_shufflehi_epi16( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflehi_epi16&expand=5208) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vpshufhw, imm8 = 5))] -#[rustc_args_required_const(2)] -pub unsafe fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i, imm8: i32) -> __m256i { - macro_rules! call { - ($imm8:expr) => { - _mm256_shufflehi_epi16(a, $imm8) - }; - } - let shuffle = constify_imm8_sae!(imm8, call); +#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) -> __m256i { + static_assert_imm8!(IMM8); + let shuffle = _mm256_shufflehi_epi16(a, IMM8); let zero = _mm256_setzero_si256().as_i16x16(); transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) } @@ -16435,9 +16431,9 @@ mod tests { #[simd_test(enable = "avx512bw,avx512vl")] unsafe fn test_mm256_maskz_shufflehi_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let r = _mm256_maskz_shufflehi_epi16(0, a, 0b00_01_01_11); + let r = _mm256_maskz_shufflehi_epi16::<0b00_01_01_11>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); - let r = _mm256_maskz_shufflehi_epi16(0b11111111_11111111, a, 0b00_01_01_11); + let r = _mm256_maskz_shufflehi_epi16::<0b00_01_01_11>(0b11111111_11111111, a); let e = _mm256_set_epi16(3, 2, 2, 0, 4, 5, 6, 7, 11, 10, 10, 8, 12, 13, 14, 15); assert_eq_m256i(r, e); }