From 54d690dc70a255e2f1aa43c182abe832b53c95ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Horstmann?= Date: Sat, 4 Dec 2021 14:03:40 +0100 Subject: [PATCH] Implement avx512 masked load and store intrinsics (#1254) --- library/stdarch/crates/core_arch/avx512f.md | 146 +- .../crates/core_arch/src/x86/avx512bw.rs | 602 +++++ .../crates/core_arch/src/x86/avx512f.rs | 2261 +++++++++++++++++ 3 files changed, 2936 insertions(+), 73 deletions(-) diff --git a/library/stdarch/crates/core_arch/avx512f.md b/library/stdarch/crates/core_arch/avx512f.md index 1ad80147cff7..9d95f0c492ca 100644 --- a/library/stdarch/crates/core_arch/avx512f.md +++ b/library/stdarch/crates/core_arch/avx512f.md @@ -1784,113 +1784,113 @@ * [x] [`_mm512_setzero_si512`] * [x] [`_mm512_setzero`] * [x] [`_mm512_load_epi32`] - * [ ] [`_mm512_mask_load_epi32`] //need i1 - * [ ] [`_mm512_maskz_load_epi32`] //need i1 + * [x] [`_mm512_mask_load_epi32`] //need i1 + * [x] [`_mm512_maskz_load_epi32`] //need i1 * [x] [`_mm_load_epi32`] - * [_] [`_mm_mask_load_epi32`] //need i1 - * [_] [`_mm_maskz_load_epi32`] //need i1 + * [x] [`_mm_mask_load_epi32`] //need i1 + * [x] [`_mm_maskz_load_epi32`] //need i1 * [x] [`_mm256_load_epi32`] - * [_] [`_mm256_mask_load_epi32`] //need i1 - * [_] [`_mm256_maskz_load_epi32`] //need i1 + * [x] [`_mm256_mask_load_epi32`] //need i1 + * [x] [`_mm256_maskz_load_epi32`] //need i1 * [x] [`_mm512_load_epi64`] - * [ ] [`_mm512_mask_load_epi64`] //need i1 - * [ ] [`_mm512_maskz_load_epi64`] //need i1 + * [x] [`_mm512_mask_load_epi64`] //need i1 + * [x] [`_mm512_maskz_load_epi64`] //need i1 * [x] [`_mm_load_epi64`] //need i1 - * [_] [`_mm_mask_load_epi64`] //need i1 - * [_] [`_mm_maskz_load_epi64`] //need i1 + * [x] [`_mm_mask_load_epi64`] //need i1 + * [x] [`_mm_maskz_load_epi64`] //need i1 * [x] [`_mm256_load_epi64`] //need i1 - * [_] [`_mm256_mask_load_epi64`] //need i1 - * [_] [`_mm256_maskz_load_epi64`] //need i1 + * [x] [`_mm256_mask_load_epi64`] //need i1 + * [x] [`_mm256_maskz_load_epi64`] //need i1 * [x] [`_mm512_load_ps`] - * [ ] [`_mm512_mask_load_ps`] //need i1 - * [ ] [`_mm512_maskz_load_ps`] //need i1 - * [_] [`_mm_maskz_load_ps`] //need i - * [_] [`_mm_mask_load_ps`] //need i1 - * [_] [`_mm_maskz_load_ps`] //need i1 - * [_] [`_mm256_mask_load_ps`] //need i1 - * [_] [`_mm256_maskz_load_ps`] //need i1 + * [x] [`_mm512_mask_load_ps`] //need i1 + * [x] [`_mm512_maskz_load_ps`] //need i1 + * [x] [`_mm_maskz_load_ps`] //need i + * [x] [`_mm_mask_load_ps`] //need i1 + * [x] [`_mm_maskz_load_ps`] //need i1 + * [x] [`_mm256_mask_load_ps`] //need i1 + * [x] [`_mm256_maskz_load_ps`] //need i1 * [x] [`_mm512_load_pd`] - * [ ] [`_mm512_mask_load_pd`] //need i1 - * [ ] [`_mm512_maskz_load_pd`] //need i1 - * [_] [`_mm_mask_load_pd`] //need i1 - * [_] [`_mm_maskz_load_pd`] //need i1 - * [_] [`_mm256_mask_load_pd`] //need i1 - * [_] [`_mm256_maskz_load_pd`] //need i1 + * [x] [`_mm512_mask_load_pd`] //need i1 + * [x] [`_mm512_maskz_load_pd`] //need i1 + * [x] [`_mm_mask_load_pd`] //need i1 + * [x] [`_mm_maskz_load_pd`] //need i1 + * [x] [`_mm256_mask_load_pd`] //need i1 + * [x] [`_mm256_maskz_load_pd`] //need i1 * [x] [`_mm512_load_si512`] * [x] [`_mm512_loadu_epi32`] - * [ ] [`_mm512_mask_loadu_epi32`] //need i1 + * [x] [`_mm512_mask_loadu_epi32`] //need i1 * [x] [`_mm_loadu_epi32`] - * [_] [`_mm_mask_loadu_epi32`] //need i1 - * [_] [`_mm_maskz_loadu_epi32`] //need i1 - * [ ] [`_mm512_maskz_loadu_epi32`] //need i1 + * [x] [`_mm_mask_loadu_epi32`] //need i1 + * [x] [`_mm_maskz_loadu_epi32`] //need i1 + * [x] [`_mm512_maskz_loadu_epi32`] //need i1 * [x] [`_mm256_loadu_epi32`] - * [_] [`_mm256_mask_loadu_epi32`] //need i1 - * [_] [`_mm256_maskz_loadu_epi32`] //need i1 + * [x] [`_mm256_mask_loadu_epi32`] //need i1 + * [x] [`_mm256_maskz_loadu_epi32`] //need i1 * [x] [`_mm512_loadu_epi64`] - * [ ] [`_mm512_mask_loadu_epi64`] //need i1 - * [ ] [`_mm512_maskz_loadu_epi64`] //need i1 + * [x] [`_mm512_mask_loadu_epi64`] //need i1 + * [x] [`_mm512_maskz_loadu_epi64`] //need i1 * [x] [`_mm_loadu_epi64`] - * [_] [`_mm_mask_loadu_epi64`] //need i1 - * [_] [`_mm_maskz_loadu_epi64`] //need i1 + * [x] [`_mm_mask_loadu_epi64`] //need i1 + * [x] [`_mm_maskz_loadu_epi64`] //need i1 * [x] [`_mm256_loadu_epi64`] - * [_] [`_mm256_mask_loadu_epi64`] //need i1 - * [_] [`_mm256_maskz_loadu_epi64`] //need i1 + * [x] [`_mm256_mask_loadu_epi64`] //need i1 + * [x] [`_mm256_maskz_loadu_epi64`] //need i1 * [x] [`_mm512_loadu_ps`] - * [ ] [`_mm512_mask_loadu_ps`] //need i1 - * [ ] [`_mm512_maskz_loadu_ps`] //need i1 - * [_] [`_mm_mask_loadu_ps`] //need i1 - * [_] [`_mm_maskz_loadu_ps`] //need i1 - * [_] [`_mm256_mask_loadu_ps`] //need i1 - * [_] [`_mm256_maskz_loadu_ps`] //need i1 + * [x] [`_mm512_mask_loadu_ps`] //need i1 + * [x] [`_mm512_maskz_loadu_ps`] //need i1 + * [x] [`_mm_mask_loadu_ps`] //need i1 + * [x] [`_mm_maskz_loadu_ps`] //need i1 + * [x] [`_mm256_mask_loadu_ps`] //need i1 + * [x] [`_mm256_maskz_loadu_ps`] //need i1 * [x] [`_mm512_loadu_pd`] - * [ ] [`_mm512_mask_loadu_pd`] //need i1 - * [ ] [`_mm512_maskz_loadu_pd`] //need i1 - * [_] [`_mm_mask_loadu_pd`] //need i1 - * [_] [`_mm_maskz_loadu_pd`] //need i1 - * [_] [`_mm256_mask_loadu_pd`] //need i1 - * [_] [`_mm256_maskz_loadu_pd`] //need i1 + * [x] [`_mm512_mask_loadu_pd`] //need i1 + * [x] [`_mm512_maskz_loadu_pd`] //need i1 + * [x] [`_mm_mask_loadu_pd`] //need i1 + * [x] [`_mm_maskz_loadu_pd`] //need i1 + * [x] [`_mm256_mask_loadu_pd`] //need i1 + * [x] [`_mm256_maskz_loadu_pd`] //need i1 * [x] [`_mm512_loadu_si512`] * [x] [`_mm512_store_epi32`] - * [ ] [`_mm512_mask_store_epi32`] //need i1 - * [_] [`_mm_mask_store_epi32`] //need i1 + * [x] [`_mm512_mask_store_epi32`] //need i1 + * [x] [`_mm_mask_store_epi32`] //need i1 * [x] [`_mm_store_epi32`] - * [_] [`_mm256_mask_store_epi32`] //need i1 + * [x] [`_mm256_mask_store_epi32`] //need i1 * [x] [`_mm256_store_epi32`] * [x] [`_mm512_store_epi64`] - * [ ] [`_mm512_mask_store_epi64`] //need i1 - * [_] [`_mm_mask_store_epi64`] //need i1 + * [x] [`_mm512_mask_store_epi64`] //need i1 + * [x] [`_mm_mask_store_epi64`] //need i1 * [x] [`_mm_store_epi64`] - * [_] [`_mm256_mask_store_epi64`] //need i1 + * [x] [`_mm256_mask_store_epi64`] //need i1 * [x] [`_mm256_store_epi64`] * [x] [`_mm512_store_ps`] - * [ ] [`_mm512_mask_store_ps`] //need i1 - * [_] [`_mm_mask_store_ps`] //need i1 - * [_] [`_mm256_mask_store_ps`] //need i1 + * [x] [`_mm512_mask_store_ps`] //need i1 + * [x] [`_mm_mask_store_ps`] //need i1 + * [x] [`_mm256_mask_store_ps`] //need i1 * [x] [`_mm512_store_pd`] - * [ ] [`_mm512_mask_store_pd`] //need i1 - * [_] [`_mm_mask_store_pd`] //need i1 - * [_] [`_mm256_mask_store_pd`] //need i1 + * [x] [`_mm512_mask_store_pd`] //need i1 + * [x] [`_mm_mask_store_pd`] //need i1 + * [x] [`_mm256_mask_store_pd`] //need i1 * [x] [`_mm512_store_si512`] * [x] [`_mm512_storeu_epi32`] - * [ ] [`_mm512_mask_storeu_epi32`] //need i1 - * [_] [`_mm_mask_storeu_epi32`] //need i1 + * [x] [`_mm512_mask_storeu_epi32`] //need i1 + * [x] [`_mm_mask_storeu_epi32`] //need i1 * [x] [`_mm_storeu_epi32`] - * [_] [`_mm256_mask_storeu_epi32`] //need i1 + * [x] [`_mm256_mask_storeu_epi32`] //need i1 * [x] [`_mm256_storeu_epi32`] * [x] [`_mm512_storeu_epi64`] - * [ ] [`_mm512_mask_storeu_epi64`] //need i1 - * [_] [`_mm_mask_storeu_epi64`] //need i1 + * [x] [`_mm512_mask_storeu_epi64`] //need i1 + * [x] [`_mm_mask_storeu_epi64`] //need i1 * [x] [`_mm_storeu_epi64`] - * [_] [`_mm256_mask_storeu_epi64`] //need i1 + * [x] [`_mm256_mask_storeu_epi64`] //need i1 * [x] [`_mm256_storeu_epi64`] * [x] [`_mm512_storeu_ps`] - * [ ] [`_mm512_mask_storeu_ps`] //need i1 - * [_] [`_mm_mask_storeu_ps`] //need i1 - * [_] [`_mm256_mask_storeu_ps`] //need i1 + * [x] [`_mm512_mask_storeu_ps`] //need i1 + * [x] [`_mm_mask_storeu_ps`] //need i1 + * [x] [`_mm256_mask_storeu_ps`] //need i1 * [x] [`_mm512_storeu_pd`] - * [ ] [`_mm512_mask_storeu_pd`] //need i1 - * [_] [`_mm_mask_storeu_pd`] //need i1 - * [_] [`_mm256_mask_storeu_pd`] //need i1 + * [x] [`_mm512_mask_storeu_pd`] //need i1 + * [x] [`_mm_mask_storeu_pd`] //need i1 + * [x] [`_mm256_mask_storeu_pd`] //need i1 * [x] [`_mm512_storeu_si512`] * [ ] [`_mm512_stream_load_si512`] //stream_load_si256, ... not implment yet * [x] [`_mm512_stream_pd`] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs index 10e00963390b..0363004674eb 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs @@ -4227,6 +4227,330 @@ pub unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); } +/// Load packed 16-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw")] +pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *const i16) -> __m512i { + let mut dst: __m512i = src; + asm!( + "vmovdqu16 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 16-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw")] +pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i { + let mut dst: __m512i; + asm!( + "vmovdqu16 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 8-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw")] +pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *const i8) -> __m512i { + let mut dst: __m512i = src; + asm!( + "vmovdqu8 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 8-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw")] +pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i { + let mut dst: __m512i; + asm!( + "vmovdqu8 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 16-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *const i16) -> __m256i { + let mut dst: __m256i = src; + asm!( + "vmovdqu16 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 16-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i { + let mut dst: __m256i; + asm!( + "vmovdqu16 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 8-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *const i8) -> __m256i { + let mut dst: __m256i = src; + asm!( + "vmovdqu8 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 8-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i { + let mut dst: __m256i; + asm!( + "vmovdqu8 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 16-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i16) -> __m128i { + let mut dst: __m128i = src; + asm!( + "vmovdqu16 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 16-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i { + let mut dst: __m128i; + asm!( + "vmovdqu16 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 8-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i8) -> __m128i { + let mut dst: __m128i = src; + asm!( + "vmovdqu8 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 8-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i { + let mut dst: __m128i; + asm!( + "vmovdqu8 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Store packed 16-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw")] +pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: __m512i) { + asm!( + "vmovdqu16 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed 8-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw")] +pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m512i) { + asm!( + "vmovdqu8 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed 16-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: __m256i) { + asm!( + "vmovdqu16 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed 8-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m256i) { + asm!( + "vmovdqu8 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed 16-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi16) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m128i) { + asm!( + "vmovdqu16 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed 8-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi8) +#[inline] +#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128i) { + asm!( + "vmovdqu8 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + /// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_madd_epi16&expand=3511) @@ -13826,6 +14150,284 @@ mod tests { assert_eq_m128i(r, a); } + #[simd_test(enable = "avx512f,avx512bw")] + unsafe fn test_mm512_mask_loadu_epi16() { + let src = _mm512_set1_epi16(42); + let a = &[ + 1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]; + let p = a.as_ptr(); + let m = 0b10101010_11001100_11101000_11001010; + let r = _mm512_mask_loadu_epi16(src, m, black_box(p)); + let e = &[ + 42_i16, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, 42, 42, 19, 20, 42, 42, + 23, 24, 42, 26, 42, 28, 42, 30, 42, 32, + ]; + let e = _mm512_loadu_epi16(e.as_ptr()); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw")] + unsafe fn test_mm512_maskz_loadu_epi16() { + let a = &[ + 1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]; + let p = a.as_ptr(); + let m = 0b10101010_11001100_11101000_11001010; + let r = _mm512_maskz_loadu_epi16(m, black_box(p)); + let e = &[ + 0_i16, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16, 0, 0, 19, 20, 0, 0, 23, 24, 0, + 26, 0, 28, 0, 30, 0, 32, + ]; + let e = _mm512_loadu_epi16(e.as_ptr()); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw")] + unsafe fn test_mm512_mask_storeu_epi16() { + let mut r = [42_i16; 32]; + let a = &[ + 1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]; + let a = _mm512_loadu_epi16(a.as_ptr()); + let m = 0b10101010_11001100_11101000_11001010; + _mm512_mask_storeu_epi16(r.as_mut_ptr(), m, a); + let e = &[ + 42_i16, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, 42, 42, 19, 20, 42, 42, + 23, 24, 42, 26, 42, 28, 42, 30, 42, 32, + ]; + let e = _mm512_loadu_epi16(e.as_ptr()); + assert_eq_m512i(_mm512_loadu_epi16(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512bw")] + unsafe fn test_mm512_mask_loadu_epi8() { + let src = _mm512_set1_epi8(42); + let a = &[ + 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + ]; + let p = a.as_ptr(); + let m = 0b00000000_11111111_11111111_00000000_10101010_11001100_11101000_11001010; + let r = _mm512_mask_loadu_epi8(src, m, black_box(p)); + let e = &[ + 42_i8, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, 42, 42, 19, 20, 42, 42, + 23, 24, 42, 26, 42, 28, 42, 30, 42, 32, 42, 42, 42, 42, 42, 42, 42, 42, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 42, 42, 42, 42, 42, 42, 42, 42, + ]; + let e = _mm512_loadu_epi8(e.as_ptr()); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw")] + unsafe fn test_mm512_maskz_loadu_epi8() { + let a = &[ + 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + ]; + let p = a.as_ptr(); + let m = 0b00000000_11111111_11111111_00000000_10101010_11001100_11101000_11001010; + let r = _mm512_maskz_loadu_epi8(m, black_box(p)); + let e = &[ + 0_i8, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16, 0, 0, 19, 20, 0, 0, 23, 24, 0, + 26, 0, 28, 0, 30, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let e = _mm512_loadu_epi8(e.as_ptr()); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw")] + unsafe fn test_mm512_mask_storeu_epi8() { + let mut r = [42_i8; 64]; + let a = &[ + 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + ]; + let a = _mm512_loadu_epi8(a.as_ptr()); + let m = 0b00000000_11111111_11111111_00000000_10101010_11001100_11101000_11001010; + _mm512_mask_storeu_epi8(r.as_mut_ptr(), m, a); + let e = &[ + 42_i8, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, 42, 42, 19, 20, 42, 42, + 23, 24, 42, 26, 42, 28, 42, 30, 42, 32, 42, 42, 42, 42, 42, 42, 42, 42, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 42, 42, 42, 42, 42, 42, 42, 42, + ]; + let e = _mm512_loadu_epi8(e.as_ptr()); + assert_eq_m512i(_mm512_loadu_epi8(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_loadu_epi16() { + let src = _mm256_set1_epi16(42); + let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm256_mask_loadu_epi16(src, m, black_box(p)); + let e = &[ + 42_i16, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, + ]; + let e = _mm256_loadu_epi16(e.as_ptr()); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm256_maskz_loadu_epi16() { + let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm256_maskz_loadu_epi16(m, black_box(p)); + let e = &[0_i16, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16]; + let e = _mm256_loadu_epi16(e.as_ptr()); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_storeu_epi16() { + let mut r = [42_i16; 16]; + let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let a = _mm256_loadu_epi16(a.as_ptr()); + let m = 0b11101000_11001010; + _mm256_mask_storeu_epi16(r.as_mut_ptr(), m, a); + let e = &[ + 42_i16, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, + ]; + let e = _mm256_loadu_epi16(e.as_ptr()); + assert_eq_m256i(_mm256_loadu_epi16(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_loadu_epi8() { + let src = _mm256_set1_epi8(42); + let a = &[ + 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]; + let p = a.as_ptr(); + let m = 0b10101010_11001100_11101000_11001010; + let r = _mm256_mask_loadu_epi8(src, m, black_box(p)); + let e = &[ + 42_i8, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, 42, 42, 19, 20, 42, 42, + 23, 24, 42, 26, 42, 28, 42, 30, 42, 32, + ]; + let e = _mm256_loadu_epi8(e.as_ptr()); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm256_maskz_loadu_epi8() { + let a = &[ + 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]; + let p = a.as_ptr(); + let m = 0b10101010_11001100_11101000_11001010; + let r = _mm256_maskz_loadu_epi8(m, black_box(p)); + let e = &[ + 0_i8, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16, 0, 0, 19, 20, 0, 0, 23, 24, 0, + 26, 0, 28, 0, 30, 0, 32, + ]; + let e = _mm256_loadu_epi8(e.as_ptr()); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm256_mask_storeu_epi8() { + let mut r = [42_i8; 32]; + let a = &[ + 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]; + let a = _mm256_loadu_epi8(a.as_ptr()); + let m = 0b10101010_11001100_11101000_11001010; + _mm256_mask_storeu_epi8(r.as_mut_ptr(), m, a); + let e = &[ + 42_i8, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, 42, 42, 19, 20, 42, 42, + 23, 24, 42, 26, 42, 28, 42, 30, 42, 32, + ]; + let e = _mm256_loadu_epi8(e.as_ptr()); + assert_eq_m256i(_mm256_loadu_epi8(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm_mask_loadu_epi16() { + let src = _mm_set1_epi16(42); + let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm_mask_loadu_epi16(src, m, black_box(p)); + let e = &[42_i16, 2, 42, 4, 42, 42, 7, 8]; + let e = _mm_loadu_epi16(e.as_ptr()); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm_maskz_loadu_epi16() { + let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm_maskz_loadu_epi16(m, black_box(p)); + let e = &[0_i16, 2, 0, 4, 0, 0, 7, 8]; + let e = _mm_loadu_epi16(e.as_ptr()); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm_mask_storeu_epi16() { + let mut r = [42_i16; 8]; + let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8]; + let a = _mm_loadu_epi16(a.as_ptr()); + let m = 0b11001010; + _mm_mask_storeu_epi16(r.as_mut_ptr(), m, a); + let e = &[42_i16, 2, 42, 4, 42, 42, 7, 8]; + let e = _mm_loadu_epi16(e.as_ptr()); + assert_eq_m128i(_mm_loadu_epi16(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm_mask_loadu_epi8() { + let src = _mm_set1_epi8(42); + let a = &[1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm_mask_loadu_epi8(src, m, black_box(p)); + let e = &[ + 42_i8, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, + ]; + let e = _mm_loadu_epi8(e.as_ptr()); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm_maskz_loadu_epi8() { + let a = &[1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm_maskz_loadu_epi8(m, black_box(p)); + let e = &[0_i8, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16]; + let e = _mm_loadu_epi8(e.as_ptr()); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512bw,avx512vl")] + unsafe fn test_mm_mask_storeu_epi8() { + let mut r = [42_i8; 16]; + let a = &[1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let a = _mm_loadu_epi8(a.as_ptr()); + let m = 0b11101000_11001010; + _mm_mask_storeu_epi8(r.as_mut_ptr(), m, a); + let e = &[ + 42_i8, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16, + ]; + let e = _mm_loadu_epi8(e.as_ptr()); + assert_eq_m128i(_mm_loadu_epi8(r.as_ptr()), e); + } + #[simd_test(enable = "avx512bw")] unsafe fn test_mm512_madd_epi16() { let a = _mm512_set1_epi16(1); diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index 766acf46f75d..7633442aa657 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -30323,6 +30323,1302 @@ pub unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) { ptr::write(mem_addr as *mut __m512d, a); } +/// Load packed 32-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi32) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i { + let mut dst: __m512i = src; + asm!( + "vmovdqu32 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi32) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { + let mut dst: __m512i; + asm!( + "vmovdqu32 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi64) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i { + let mut dst: __m512i = src; + asm!( + "vmovdqu64 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi64) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { + let mut dst: __m512i; + asm!( + "vmovdqu64 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_ps) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { + let mut dst: __m512 = src; + asm!( + "vmovups {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_ps) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { + let mut dst: __m512; + asm!( + "vmovups {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_pd) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d { + let mut dst: __m512d = src; + asm!( + "vmovupd {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_pd) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { + let mut dst: __m512d; + asm!( + "vmovupd {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i { + let mut dst: __m256i = src; + asm!( + "vmovdqu32 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { + let mut dst: __m256i; + asm!( + "vmovdqu32 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i { + let mut dst: __m256i = src; + asm!( + "vmovdqu64 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { + let mut dst: __m256i; + asm!( + "vmovdqu64 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { + let mut dst: __m256 = src; + asm!( + "vmovups {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { + let mut dst: __m256; + asm!( + "vmovups {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d { + let mut dst: __m256d = src; + asm!( + "vmovupd {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { + let mut dst: __m256d; + asm!( + "vmovupd {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i { + let mut dst: __m128i = src; + asm!( + "vmovdqu32 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { + let mut dst: __m128i; + asm!( + "vmovdqu32 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i { + let mut dst: __m128i = src; + asm!( + "vmovdqu64 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { + let mut dst: __m128i; + asm!( + "vmovdqu64 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { + let mut dst: __m128 = src; + asm!( + "vmovups {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { + let mut dst: __m128; + asm!( + "vmovups {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { + let mut dst: __m128d = src; + asm!( + "vmovupd {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { + let mut dst: __m128d; + asm!( + "vmovupd {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_epi32) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i { + let mut dst: __m512i = src; + asm!( + "vmovdqa32 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_epi32) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { + let mut dst: __m512i; + asm!( + "vmovdqa32 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_epi64) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i { + let mut dst: __m512i = src; + asm!( + "vmovdqa64 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_epi64) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { + let mut dst: __m512i; + asm!( + "vmovdqa64 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_ps) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { + let mut dst: __m512 = src; + asm!( + "vmovaps {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_ps) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { + let mut dst: __m512; + asm!( + "vmovaps {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_pd) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d { + let mut dst: __m512d = src; + asm!( + "vmovapd {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_pd) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { + let mut dst: __m512d; + asm!( + "vmovapd {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(zmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i { + let mut dst: __m256i = src; + asm!( + "vmovdqa32 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { + let mut dst: __m256i; + asm!( + "vmovdqa32 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i { + let mut dst: __m256i = src; + asm!( + "vmovdqa64 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { + let mut dst: __m256i; + asm!( + "vmovdqa64 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { + let mut dst: __m256 = src; + asm!( + "vmovaps {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { + let mut dst: __m256; + asm!( + "vmovaps {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d { + let mut dst: __m256d = src; + asm!( + "vmovapd {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { + let mut dst: __m256d; + asm!( + "vmovapd {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(ymm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i { + let mut dst: __m128i = src; + asm!( + "vmovdqa32 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 32-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { + let mut dst: __m128i; + asm!( + "vmovdqa32 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i { + let mut dst: __m128i = src; + asm!( + "vmovdqa64 {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed 64-bit integers from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { + let mut dst: __m128i; + asm!( + "vmovdqa64 {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { + let mut dst: __m128 = src; + asm!( + "vmovaps {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed single-precision (32-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { + let mut dst: __m128; + asm!( + "vmovaps {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { + let mut dst: __m128d = src; + asm!( + "vmovapd {2}{{{1}}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + inout(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Load packed double-precision (64-bit) floating-point elements from memory into dst using zeromask k +/// (elements are zeroed out when the corresponding mask bit is not set). +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { + let mut dst: __m128d; + asm!( + "vmovapd {2}{{{1}}} {{z}}, [{0}]", + in(reg) mem_addr, + in(kreg) k, + out(xmm_reg) dst, + options(pure, readonly, nostack) + ); + dst +} + +/// Store packed 32-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi32) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { + asm!( + "vmovdqu32 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed 64-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi64) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { + asm!( + "vmovdqu64 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_ps) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { + asm!( + "vmovups [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_pd) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { + asm!( + "vmovupd [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed 32-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { + asm!( + "vmovdqu32 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed 64-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { + asm!( + "vmovdqu64 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { + asm!( + "vmovups [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { + asm!( + "vmovupd [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed 32-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { + asm!( + "vmovdqu32 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed 64-bit integers from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { + asm!( + "vmovdqu64 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { + asm!( + "vmovups [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k. +/// mem_addr does not need to be aligned on any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { + asm!( + "vmovupd [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed 32-bit integers from a into memory using writemask k. +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_epi32) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { + asm!( + "vmovdqa32 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed 64-bit integers from a into memory using writemask k. +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_epi64) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { + asm!( + "vmovdqa64 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k. +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_ps) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { + asm!( + "vmovaps [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k. +/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_pd) +#[inline] +#[target_feature(enable = "avx512f")] +pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { + asm!( + "vmovapd [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(zmm_reg) a, + options(nostack) + ); +} + +/// Store packed 32-bit integers from a into memory using writemask k. +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { + asm!( + "vmovdqa32 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed 64-bit integers from a into memory using writemask k. +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { + asm!( + "vmovdqa64 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k. +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { + asm!( + "vmovaps [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k. +/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx")] +pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { + asm!( + "vmovapd [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(ymm_reg) a, + options(nostack) + ); +} + +/// Store packed 32-bit integers from a into memory using writemask k. +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_epi32) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { + asm!( + "vmovdqa32 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed 64-bit integers from a into memory using writemask k. +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_epi64) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { + asm!( + "vmovdqa64 [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k. +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_ps) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { + asm!( + "vmovaps [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + +/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k. +/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_pd) +#[inline] +#[target_feature(enable = "avx512f,avx512vl,avx,sse")] +pub unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { + asm!( + "vmovapd [{0}]{{{1}}}, {2}", + in(reg) mem_addr, + in(kreg) mask, + in(xmm_reg) a, + options(nostack) + ); +} + /// Set packed double-precision (64-bit) floating-point elements in dst with the supplied values in reverse order. /// /// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_pd&expand=5002) @@ -44587,6 +45883,971 @@ mod tests { assert_eq_m512(r, a); } + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_loadu_epi32() { + let src = _mm512_set1_epi32(42); + let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_mask_loadu_epi32(src, m, black_box(p)); + let e = _mm512_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_loadu_epi32() { + let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_maskz_loadu_epi32(m, black_box(p)); + let e = _mm512_setr_epi32(0, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_load_epi32() { + #[repr(align(64))] + struct Align { + data: [i32; 16], // 64 bytes + } + let src = _mm512_set1_epi32(42); + let a = Align { + data: [1_i32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + }; + let p = a.data.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_mask_load_epi32(src, m, black_box(p)); + let e = _mm512_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_load_epi32() { + #[repr(align(64))] + struct Align { + data: [i32; 16], // 64 bytes + } + let a = Align { + data: [1_i32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + }; + let p = a.data.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_maskz_load_epi32(m, black_box(p)); + let e = _mm512_setr_epi32(0, 2, 0, 4, 0, 0, 7, 8, 0, 0, 0, 12, 0, 14, 15, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_storeu_epi32() { + let mut r = [42_i32; 16]; + let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let m = 0b11101000_11001010; + _mm512_mask_storeu_epi32(r.as_mut_ptr(), m, a); + let e = _mm512_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16); + assert_eq_m512i(_mm512_loadu_epi32(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_store_epi32() { + #[repr(align(64))] + struct Align { + data: [i32; 16], + } + let mut r = Align { data: [42; 16] }; + let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let m = 0b11101000_11001010; + _mm512_mask_store_epi32(r.data.as_mut_ptr(), m, a); + let e = _mm512_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8, 42, 42, 42, 12, 42, 14, 15, 16); + assert_eq_m512i(_mm512_load_epi32(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_loadu_epi64() { + let src = _mm512_set1_epi64(42); + let a = &[1_i64, 2, 3, 4, 5, 6, 7, 8]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm512_mask_loadu_epi64(src, m, black_box(p)); + let e = _mm512_setr_epi64(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_loadu_epi64() { + let a = &[1_i64, 2, 3, 4, 5, 6, 7, 8]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm512_maskz_loadu_epi64(m, black_box(p)); + let e = _mm512_setr_epi64(0, 2, 0, 4, 0, 0, 7, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_load_epi64() { + #[repr(align(64))] + struct Align { + data: [i64; 8], // 64 bytes + } + let src = _mm512_set1_epi64(42); + let a = Align { + data: [1_i64, 2, 3, 4, 5, 6, 7, 8], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm512_mask_load_epi64(src, m, black_box(p)); + let e = _mm512_setr_epi64(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_load_epi64() { + #[repr(align(64))] + struct Align { + data: [i64; 8], // 64 bytes + } + let a = Align { + data: [1_i64, 2, 3, 4, 5, 6, 7, 8], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm512_maskz_load_epi64(m, black_box(p)); + let e = _mm512_setr_epi64(0, 2, 0, 4, 0, 0, 7, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_storeu_epi64() { + let mut r = [42_i64; 8]; + let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let m = 0b11001010; + _mm512_mask_storeu_epi64(r.as_mut_ptr(), m, a); + let e = _mm512_setr_epi64(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m512i(_mm512_loadu_epi64(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_store_epi64() { + #[repr(align(64))] + struct Align { + data: [i64; 8], + } + let mut r = Align { data: [42; 8] }; + let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let m = 0b11001010; + let p = r.data.as_mut_ptr(); + _mm512_mask_store_epi64(p, m, a); + let e = _mm512_setr_epi64(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m512i(_mm512_load_epi64(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_loadu_ps() { + let src = _mm512_set1_ps(42.0); + let a = &[ + 1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, + 16.0, + ]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_mask_loadu_ps(src, m, black_box(p)); + let e = _mm512_setr_ps( + 42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0, 42.0, 42.0, 42.0, 12.0, 42.0, 14.0, 15.0, + 16.0, + ); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_loadu_ps() { + let a = &[ + 1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, + 16.0, + ]; + let p = a.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_maskz_loadu_ps(m, black_box(p)); + let e = _mm512_setr_ps( + 0.0, 2.0, 0.0, 4.0, 0.0, 0.0, 7.0, 8.0, 0.0, 0.0, 0.0, 12.0, 0.0, 14.0, 15.0, 16.0, + ); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_load_ps() { + #[repr(align(64))] + struct Align { + data: [f32; 16], // 64 bytes + } + let src = _mm512_set1_ps(42.0); + let a = Align { + data: [ + 1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, + ], + }; + let p = a.data.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_mask_load_ps(src, m, black_box(p)); + let e = _mm512_setr_ps( + 42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0, 42.0, 42.0, 42.0, 12.0, 42.0, 14.0, 15.0, + 16.0, + ); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_load_ps() { + #[repr(align(64))] + struct Align { + data: [f32; 16], // 64 bytes + } + let a = Align { + data: [ + 1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, + ], + }; + let p = a.data.as_ptr(); + let m = 0b11101000_11001010; + let r = _mm512_maskz_load_ps(m, black_box(p)); + let e = _mm512_setr_ps( + 0.0, 2.0, 0.0, 4.0, 0.0, 0.0, 7.0, 8.0, 0.0, 0.0, 0.0, 12.0, 0.0, 14.0, 15.0, 16.0, + ); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_storeu_ps() { + let mut r = [42_f32; 16]; + let a = _mm512_setr_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let m = 0b11101000_11001010; + _mm512_mask_storeu_ps(r.as_mut_ptr(), m, a); + let e = _mm512_setr_ps( + 42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0, 42.0, 42.0, 42.0, 12.0, 42.0, 14.0, 15.0, + 16.0, + ); + assert_eq_m512(_mm512_loadu_ps(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_store_ps() { + #[repr(align(64))] + struct Align { + data: [f32; 16], + } + let mut r = Align { data: [42.0; 16] }; + let a = _mm512_setr_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let m = 0b11101000_11001010; + _mm512_mask_store_ps(r.data.as_mut_ptr(), m, a); + let e = _mm512_setr_ps( + 42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0, 42.0, 42.0, 42.0, 12.0, 42.0, 14.0, 15.0, + 16.0, + ); + assert_eq_m512(_mm512_load_ps(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_loadu_pd() { + let src = _mm512_set1_pd(42.0); + let a = &[1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm512_mask_loadu_pd(src, m, black_box(p)); + let e = _mm512_setr_pd(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_loadu_pd() { + let a = &[1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm512_maskz_loadu_pd(m, black_box(p)); + let e = _mm512_setr_pd(0.0, 2.0, 0.0, 4.0, 0.0, 0.0, 7.0, 8.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_load_pd() { + #[repr(align(64))] + struct Align { + data: [f64; 8], // 64 bytes + } + let src = _mm512_set1_pd(42.0); + let a = Align { + data: [1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm512_mask_load_pd(src, m, black_box(p)); + let e = _mm512_setr_pd(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_maskz_load_pd() { + #[repr(align(64))] + struct Align { + data: [f64; 8], // 64 bytes + } + let a = Align { + data: [1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm512_maskz_load_pd(m, black_box(p)); + let e = _mm512_setr_pd(0.0, 2.0, 0.0, 4.0, 0.0, 0.0, 7.0, 8.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_storeu_pd() { + let mut r = [42_f64; 8]; + let a = _mm512_setr_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let m = 0b11001010; + _mm512_mask_storeu_pd(r.as_mut_ptr(), m, a); + let e = _mm512_setr_pd(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m512d(_mm512_loadu_pd(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f")] + unsafe fn test_mm512_mask_store_pd() { + #[repr(align(64))] + struct Align { + data: [f64; 8], + } + let mut r = Align { data: [42.0; 8] }; + let a = _mm512_setr_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let m = 0b11001010; + _mm512_mask_store_pd(r.data.as_mut_ptr(), m, a); + let e = _mm512_setr_pd(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m512d(_mm512_load_pd(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_loadu_epi32() { + let src = _mm256_set1_epi32(42); + let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm256_mask_loadu_epi32(src, m, black_box(p)); + let e = _mm256_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_loadu_epi32() { + let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm256_maskz_loadu_epi32(m, black_box(p)); + let e = _mm256_setr_epi32(0, 2, 0, 4, 0, 0, 7, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_load_epi32() { + #[repr(align(32))] + struct Align { + data: [i32; 8], // 32 bytes + } + let src = _mm256_set1_epi32(42); + let a = Align { + data: [1_i32, 2, 3, 4, 5, 6, 7, 8], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm256_mask_load_epi32(src, m, black_box(p)); + let e = _mm256_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_load_epi32() { + #[repr(align(32))] + struct Align { + data: [i32; 8], // 32 bytes + } + let a = Align { + data: [1_i32, 2, 3, 4, 5, 6, 7, 8], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm256_maskz_load_epi32(m, black_box(p)); + let e = _mm256_setr_epi32(0, 2, 0, 4, 0, 0, 7, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_storeu_epi32() { + let mut r = [42_i32; 8]; + let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let m = 0b11001010; + _mm256_mask_storeu_epi32(r.as_mut_ptr(), m, a); + let e = _mm256_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m256i(_mm256_loadu_epi32(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_store_epi32() { + #[repr(align(64))] + struct Align { + data: [i32; 8], + } + let mut r = Align { data: [42; 8] }; + let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let m = 0b11001010; + _mm256_mask_store_epi32(r.data.as_mut_ptr(), m, a); + let e = _mm256_setr_epi32(42, 2, 42, 4, 42, 42, 7, 8); + assert_eq_m256i(_mm256_load_epi32(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_loadu_epi64() { + let src = _mm256_set1_epi64x(42); + let a = &[1_i64, 2, 3, 4]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm256_mask_loadu_epi64(src, m, black_box(p)); + let e = _mm256_setr_epi64x(42, 2, 42, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_loadu_epi64() { + let a = &[1_i64, 2, 3, 4]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm256_maskz_loadu_epi64(m, black_box(p)); + let e = _mm256_setr_epi64x(0, 2, 0, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_load_epi64() { + #[repr(align(32))] + struct Align { + data: [i64; 4], // 32 bytes + } + let src = _mm256_set1_epi64x(42); + let a = Align { + data: [1_i64, 2, 3, 4], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm256_mask_load_epi64(src, m, black_box(p)); + let e = _mm256_setr_epi64x(42, 2, 42, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_load_epi64() { + #[repr(align(32))] + struct Align { + data: [i64; 4], // 32 bytes + } + let a = Align { + data: [1_i64, 2, 3, 4], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm256_maskz_load_epi64(m, black_box(p)); + let e = _mm256_setr_epi64x(0, 2, 0, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_storeu_epi64() { + let mut r = [42_i64; 4]; + let a = _mm256_setr_epi64x(1, 2, 3, 4); + let m = 0b1010; + _mm256_mask_storeu_epi64(r.as_mut_ptr(), m, a); + let e = _mm256_setr_epi64x(42, 2, 42, 4); + assert_eq_m256i(_mm256_loadu_epi64(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_store_epi64() { + #[repr(align(32))] + struct Align { + data: [i64; 4], + } + let mut r = Align { data: [42; 4] }; + let a = _mm256_setr_epi64x(1, 2, 3, 4); + let m = 0b1010; + _mm256_mask_store_epi64(r.data.as_mut_ptr(), m, a); + let e = _mm256_setr_epi64x(42, 2, 42, 4); + assert_eq_m256i(_mm256_load_epi64(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_loadu_ps() { + let src = _mm256_set1_ps(42.0); + let a = &[1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm256_mask_loadu_ps(src, m, black_box(p)); + let e = _mm256_setr_ps(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_loadu_ps() { + let a = &[1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let p = a.as_ptr(); + let m = 0b11001010; + let r = _mm256_maskz_loadu_ps(m, black_box(p)); + let e = _mm256_setr_ps(0.0, 2.0, 0.0, 4.0, 0.0, 0.0, 7.0, 8.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_load_ps() { + #[repr(align(32))] + struct Align { + data: [f32; 8], // 32 bytes + } + let src = _mm256_set1_ps(42.0); + let a = Align { + data: [1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm256_mask_load_ps(src, m, black_box(p)); + let e = _mm256_setr_ps(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_load_ps() { + #[repr(align(32))] + struct Align { + data: [f32; 8], // 32 bytes + } + let a = Align { + data: [1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + }; + let p = a.data.as_ptr(); + let m = 0b11001010; + let r = _mm256_maskz_load_ps(m, black_box(p)); + let e = _mm256_setr_ps(0.0, 2.0, 0.0, 4.0, 0.0, 0.0, 7.0, 8.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_storeu_ps() { + let mut r = [42_f32; 8]; + let a = _mm256_setr_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let m = 0b11001010; + _mm256_mask_storeu_ps(r.as_mut_ptr(), m, a); + let e = _mm256_setr_ps(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m256(_mm256_loadu_ps(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_store_ps() { + #[repr(align(32))] + struct Align { + data: [f32; 8], + } + let mut r = Align { data: [42.0; 8] }; + let a = _mm256_setr_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let m = 0b11001010; + _mm256_mask_store_ps(r.data.as_mut_ptr(), m, a); + let e = _mm256_setr_ps(42.0, 2.0, 42.0, 4.0, 42.0, 42.0, 7.0, 8.0); + assert_eq_m256(_mm256_load_ps(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_loadu_pd() { + let src = _mm256_set1_pd(42.0); + let a = &[1.0_f64, 2.0, 3.0, 4.0]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm256_mask_loadu_pd(src, m, black_box(p)); + let e = _mm256_setr_pd(42.0, 2.0, 42.0, 4.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_loadu_pd() { + let a = &[1.0_f64, 2.0, 3.0, 4.0]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm256_maskz_loadu_pd(m, black_box(p)); + let e = _mm256_setr_pd(0.0, 2.0, 0.0, 4.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_load_pd() { + #[repr(align(32))] + struct Align { + data: [f64; 4], // 32 bytes + } + let src = _mm256_set1_pd(42.0); + let a = Align { + data: [1.0_f64, 2.0, 3.0, 4.0], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm256_mask_load_pd(src, m, black_box(p)); + let e = _mm256_setr_pd(42.0, 2.0, 42.0, 4.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_maskz_load_pd() { + #[repr(align(32))] + struct Align { + data: [f64; 4], // 32 bytes + } + let a = Align { + data: [1.0_f64, 2.0, 3.0, 4.0], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm256_maskz_load_pd(m, black_box(p)); + let e = _mm256_setr_pd(0.0, 2.0, 0.0, 4.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_storeu_pd() { + let mut r = [42_f64; 4]; + let a = _mm256_setr_pd(1.0, 2.0, 3.0, 4.0); + let m = 0b1010; + _mm256_mask_storeu_pd(r.as_mut_ptr(), m, a); + let e = _mm256_setr_pd(42.0, 2.0, 42.0, 4.0); + assert_eq_m256d(_mm256_loadu_pd(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm256_mask_store_pd() { + #[repr(align(32))] + struct Align { + data: [f64; 4], + } + let mut r = Align { data: [42.0; 4] }; + let a = _mm256_setr_pd(1.0, 2.0, 3.0, 4.0); + let m = 0b1010; + _mm256_mask_store_pd(r.data.as_mut_ptr(), m, a); + let e = _mm256_setr_pd(42.0, 2.0, 42.0, 4.0); + assert_eq_m256d(_mm256_load_pd(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_loadu_epi32() { + let src = _mm_set1_epi32(42); + let a = &[1_i32, 2, 3, 4]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm_mask_loadu_epi32(src, m, black_box(p)); + let e = _mm_setr_epi32(42, 2, 42, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_loadu_epi32() { + let a = &[1_i32, 2, 3, 4]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm_maskz_loadu_epi32(m, black_box(p)); + let e = _mm_setr_epi32(0, 2, 0, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_load_epi32() { + #[repr(align(16))] + struct Align { + data: [i32; 4], // 32 bytes + } + let src = _mm_set1_epi32(42); + let a = Align { + data: [1_i32, 2, 3, 4], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm_mask_load_epi32(src, m, black_box(p)); + let e = _mm_setr_epi32(42, 2, 42, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_load_epi32() { + #[repr(align(16))] + struct Align { + data: [i32; 4], // 16 bytes + } + let a = Align { + data: [1_i32, 2, 3, 4], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm_maskz_load_epi32(m, black_box(p)); + let e = _mm_setr_epi32(0, 2, 0, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_storeu_epi32() { + let mut r = [42_i32; 4]; + let a = _mm_setr_epi32(1, 2, 3, 4); + let m = 0b1010; + _mm_mask_storeu_epi32(r.as_mut_ptr(), m, a); + let e = _mm_setr_epi32(42, 2, 42, 4); + assert_eq_m128i(_mm_loadu_epi32(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_store_epi32() { + #[repr(align(16))] + struct Align { + data: [i32; 4], // 16 bytes + } + let mut r = Align { data: [42; 4] }; + let a = _mm_setr_epi32(1, 2, 3, 4); + let m = 0b1010; + _mm_mask_store_epi32(r.data.as_mut_ptr(), m, a); + let e = _mm_setr_epi32(42, 2, 42, 4); + assert_eq_m128i(_mm_load_epi32(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_loadu_epi64() { + let src = _mm_set1_epi64x(42); + let a = &[1_i64, 2]; + let p = a.as_ptr(); + let m = 0b10; + let r = _mm_mask_loadu_epi64(src, m, black_box(p)); + let e = _mm_setr_epi64x(42, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_loadu_epi64() { + let a = &[1_i64, 2]; + let p = a.as_ptr(); + let m = 0b10; + let r = _mm_maskz_loadu_epi64(m, black_box(p)); + let e = _mm_setr_epi64x(0, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_load_epi64() { + #[repr(align(16))] + struct Align { + data: [i64; 2], // 16 bytes + } + let src = _mm_set1_epi64x(42); + let a = Align { data: [1_i64, 2] }; + let p = a.data.as_ptr(); + let m = 0b10; + let r = _mm_mask_load_epi64(src, m, black_box(p)); + let e = _mm_setr_epi64x(42, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_load_epi64() { + #[repr(align(16))] + struct Align { + data: [i64; 2], // 16 bytes + } + let a = Align { data: [1_i64, 2] }; + let p = a.data.as_ptr(); + let m = 0b10; + let r = _mm_maskz_load_epi64(m, black_box(p)); + let e = _mm_setr_epi64x(0, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_storeu_epi64() { + let mut r = [42_i64; 2]; + let a = _mm_setr_epi64x(1, 2); + let m = 0b10; + _mm_mask_storeu_epi64(r.as_mut_ptr(), m, a); + let e = _mm_setr_epi64x(42, 2); + assert_eq_m128i(_mm_loadu_epi64(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_store_epi64() { + #[repr(align(16))] + struct Align { + data: [i64; 2], // 16 bytes + } + let mut r = Align { data: [42; 2] }; + let a = _mm_setr_epi64x(1, 2); + let m = 0b10; + _mm_mask_store_epi64(r.data.as_mut_ptr(), m, a); + let e = _mm_setr_epi64x(42, 2); + assert_eq_m128i(_mm_load_epi64(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_loadu_ps() { + let src = _mm_set1_ps(42.0); + let a = &[1.0_f32, 2.0, 3.0, 4.0]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm_mask_loadu_ps(src, m, black_box(p)); + let e = _mm_setr_ps(42.0, 2.0, 42.0, 4.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_loadu_ps() { + let a = &[1.0_f32, 2.0, 3.0, 4.0]; + let p = a.as_ptr(); + let m = 0b1010; + let r = _mm_maskz_loadu_ps(m, black_box(p)); + let e = _mm_setr_ps(0.0, 2.0, 0.0, 4.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_load_ps() { + #[repr(align(16))] + struct Align { + data: [f32; 4], // 16 bytes + } + let src = _mm_set1_ps(42.0); + let a = Align { + data: [1.0_f32, 2.0, 3.0, 4.0], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm_mask_load_ps(src, m, black_box(p)); + let e = _mm_setr_ps(42.0, 2.0, 42.0, 4.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_load_ps() { + #[repr(align(16))] + struct Align { + data: [f32; 4], // 16 bytes + } + let a = Align { + data: [1.0_f32, 2.0, 3.0, 4.0], + }; + let p = a.data.as_ptr(); + let m = 0b1010; + let r = _mm_maskz_load_ps(m, black_box(p)); + let e = _mm_setr_ps(0.0, 2.0, 0.0, 4.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_storeu_ps() { + let mut r = [42_f32; 4]; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let m = 0b1010; + _mm_mask_storeu_ps(r.as_mut_ptr(), m, a); + let e = _mm_setr_ps(42.0, 2.0, 42.0, 4.0); + assert_eq_m128(_mm_loadu_ps(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_store_ps() { + #[repr(align(16))] + struct Align { + data: [f32; 4], // 16 bytes + } + let mut r = Align { data: [42.0; 4] }; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let m = 0b1010; + _mm_mask_store_ps(r.data.as_mut_ptr(), m, a); + let e = _mm_setr_ps(42.0, 2.0, 42.0, 4.0); + assert_eq_m128(_mm_load_ps(r.data.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_loadu_pd() { + let src = _mm_set1_pd(42.0); + let a = &[1.0_f64, 2.0]; + let p = a.as_ptr(); + let m = 0b10; + let r = _mm_mask_loadu_pd(src, m, black_box(p)); + let e = _mm_setr_pd(42.0, 2.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_loadu_pd() { + let a = &[1.0_f64, 2.0]; + let p = a.as_ptr(); + let m = 0b10; + let r = _mm_maskz_loadu_pd(m, black_box(p)); + let e = _mm_setr_pd(0.0, 2.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_load_pd() { + #[repr(align(16))] + struct Align { + data: [f64; 2], // 16 bytes + } + let src = _mm_set1_pd(42.0); + let a = Align { + data: [1.0_f64, 2.0], + }; + let p = a.data.as_ptr(); + let m = 0b10; + let r = _mm_mask_load_pd(src, m, black_box(p)); + let e = _mm_setr_pd(42.0, 2.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_maskz_load_pd() { + #[repr(align(16))] + struct Align { + data: [f64; 2], // 16 bytes + } + let a = Align { + data: [1.0_f64, 2.0], + }; + let p = a.data.as_ptr(); + let m = 0b10; + let r = _mm_maskz_load_pd(m, black_box(p)); + let e = _mm_setr_pd(0.0, 2.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_storeu_pd() { + let mut r = [42_f64; 2]; + let a = _mm_setr_pd(1.0, 2.0); + let m = 0b10; + _mm_mask_storeu_pd(r.as_mut_ptr(), m, a); + let e = _mm_setr_pd(42.0, 2.0); + assert_eq_m128d(_mm_loadu_pd(r.as_ptr()), e); + } + + #[simd_test(enable = "avx512f,avx512vl")] + unsafe fn test_mm_mask_store_pd() { + #[repr(align(16))] + struct Align { + data: [f64; 2], // 16 bytes + } + let mut r = Align { data: [42.0; 2] }; + let a = _mm_setr_pd(1.0, 2.0); + let m = 0b10; + _mm_mask_store_pd(r.data.as_mut_ptr(), m, a); + let e = _mm_setr_pd(42.0, 2.0); + assert_eq_m128d(_mm_load_pd(r.data.as_ptr()), e); + } + #[simd_test(enable = "avx512f")] unsafe fn test_mm512_setr_pd() { let r = _mm512_set_pd(0., 1., 2., 3., 4., 5., 6., 7.);