From 7ca45bebaa0d88a0a43e16a9a81193b1aac3281d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Rakic?= Date: Sat, 6 Mar 2021 01:48:28 +0100 Subject: [PATCH] convert `_mm256_mask_i64gather_pd` to const generics --- .../stdarch/crates/core_arch/src/x86/avx2.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs index 2f0a15468468..eebcc36b453b 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs @@ -1797,24 +1797,19 @@ pub unsafe fn _mm256_i64gather_pd( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i64gather_pd) #[inline] #[target_feature(enable = "avx2")] -#[cfg_attr(test, assert_instr(vgatherqpd, scale = 1))] -#[rustc_args_required_const(4)] +#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] +#[rustc_legacy_const_generics(4)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_mask_i64gather_pd( +pub unsafe fn _mm256_mask_i64gather_pd( src: __m256d, slice: *const f64, offsets: __m256i, mask: __m256d, - scale: i32, ) -> __m256d { + static_assert_imm8_scale!(SCALE); let slice = slice as *const i8; let offsets = offsets.as_i64x4(); - macro_rules! call { - ($imm8:expr) => { - vpgatherqpd(src, slice, offsets, mask, $imm8) - }; - } - constify_imm8_gather!(scale, call) + vpgatherqpd(src, slice, offsets, mask, SCALE as i8) } /// Copies `a` to `dst`, then insert 128 bits (of integer data) from `b` at the @@ -5917,12 +5912,11 @@ mod tests { j += 1.0; } // A multiplier of 8 is word-addressing for f64s - let r = _mm256_mask_i64gather_pd( + let r = _mm256_mask_i64gather_pd::<8>( _mm256_set1_pd(256.0), arr.as_ptr(), _mm256_setr_epi64x(0, 16, 64, 96), _mm256_setr_pd(-1.0, -1.0, -1.0, 0.0), - 8, ); assert_eq_m256d(r, _mm256_setr_pd(0.0, 16.0, 64.0, 256.0)); }