From 80bb4ab06b13ac883cd986de73a0b07188bb0a53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Rakic?= Date: Sat, 6 Mar 2021 01:37:49 +0100 Subject: [PATCH] convert `_mm256_i64gather_ps` to const generics --- library/stdarch/crates/core_arch/src/x86/avx2.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs index 86353cea95c1..e8ef7f68834b 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs @@ -1592,20 +1592,16 @@ pub unsafe fn _mm_mask_i64gather_ps( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i64gather_ps) #[inline] #[target_feature(enable = "avx2")] -#[cfg_attr(test, assert_instr(vgatherqps, scale = 1))] -#[rustc_args_required_const(2)] +#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_i64gather_ps(slice: *const f32, offsets: __m256i, scale: i32) -> __m128 { +pub unsafe fn _mm256_i64gather_ps(slice: *const f32, offsets: __m256i) -> __m128 { + static_assert_imm8_scale!(SCALE); let zero = _mm_setzero_ps(); let neg_one = _mm_set1_ps(-1.0); let offsets = offsets.as_i64x4(); let slice = slice as *const i8; - macro_rules! call { - ($imm8:expr) => { - vpgatherqps(zero, slice, offsets, neg_one, $imm8) - }; - } - constify_imm8_gather!(scale, call) + vpgatherqps(zero, slice, offsets, neg_one, SCALE as i8) } /// Returns values from `slice` at offsets determined by `offsets * scale`, @@ -5819,7 +5815,7 @@ mod tests { j += 1.0; } // A multiplier of 4 is word-addressing for f32s - let r = _mm256_i64gather_ps(arr.as_ptr(), _mm256_setr_epi64x(0, 16, 32, 48), 4); + let r = _mm256_i64gather_ps::<4>(arr.as_ptr(), _mm256_setr_epi64x(0, 16, 32, 48)); assert_eq_m128(r, _mm_setr_ps(0.0, 16.0, 32.0, 48.0)); }