Use #[rustc_intrinsic_const_vector_arg] for Neon intrinsics

This allows the constant vector to be passed directly
to LLVM which fixes a few issues where the LLVM intrinsic
expects a constant vector.
This commit is contained in:
George Wort 2023-12-07 15:58:31 +00:00 committed by Amanieu d'Antras
parent 869ddbeb66
commit 2caf80145b
4 changed files with 102 additions and 97 deletions

View file

@ -21988,9 +21988,10 @@ pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vqrshrn_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqrshrn_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
}
/// Signed saturating rounded shift right narrow
@ -22026,9 +22027,10 @@ pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vqrshrn_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqrshrn_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
}
/// Signed saturating rounded shift right narrow
@ -22064,9 +22066,10 @@ pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vqrshrn_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqrshrn_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
}
/// Signed saturating rounded shift right narrow
@ -22102,9 +22105,10 @@ pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
}
vqrshrn_n_u16_(a, uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]))
vqrshrn_n_u16_(a, const { uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16) })
}
/// Unsigned signed saturating rounded shift right narrow
@ -22140,9 +22144,10 @@ pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
}
vqrshrn_n_u32_(a, uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]))
vqrshrn_n_u32_(a, const { uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32) })
}
/// Unsigned signed saturating rounded shift right narrow
@ -22178,9 +22183,10 @@ pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
}
vqrshrn_n_u64_(a, uint64x2_t([-N as u64, -N as u64]))
vqrshrn_n_u64_(a, const { uint64x2_t(-N as u64, -N as u64) })
}
/// Unsigned signed saturating rounded shift right narrow
@ -22216,9 +22222,10 @@ pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
}
vqrshrun_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqrshrun_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
}
/// Signed saturating rounded shift right unsigned narrow
@ -22254,9 +22261,10 @@ pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
}
vqrshrun_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqrshrun_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
}
/// Signed saturating rounded shift right unsigned narrow
@ -22292,9 +22300,10 @@ pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqrshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
}
vqrshrun_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqrshrun_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
}
/// Signed saturating rounded shift right unsigned narrow
@ -22906,9 +22915,10 @@ pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
}
vqshlu_n_s8_(a, int8x8_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshlu_n_s8_(a, const { int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
}
/// Signed saturating shift left unsigned
@ -22924,10 +22934,11 @@ pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
static_assert_uimm_bits!(N, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
}
vqshlu_n_s8_(a, int8x8_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshlu_n_s8_(a, const { int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
}
/// Signed saturating shift left unsigned
@ -22944,9 +22955,10 @@ pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
}
vqshlu_n_s16_(a, int16x4_t([N as i16, N as i16, N as i16, N as i16]))
vqshlu_n_s16_(a, const { int16x4_t(N as i16, N as i16, N as i16, N as i16) })
}
/// Signed saturating shift left unsigned
@ -22962,10 +22974,11 @@ pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
static_assert_uimm_bits!(N, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
}
vqshlu_n_s16_(a, int16x4_t([N as i16, N as i16, N as i16, N as i16]))
vqshlu_n_s16_(a, const { int16x4_t(N as i16, N as i16, N as i16, N as i16) })
}
/// Signed saturating shift left unsigned
@ -22982,9 +22995,10 @@ pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
}
vqshlu_n_s32_(a, int32x2_t([N as i32, N as i32]))
vqshlu_n_s32_(a, const { int32x2_t(N as i32, N as i32) })
}
/// Signed saturating shift left unsigned
@ -23000,10 +23014,11 @@ pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
static_assert_uimm_bits!(N, 5);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
}
vqshlu_n_s32_(a, int32x2_t([N as i32, N as i32]))
vqshlu_n_s32_(a, const { int32x2_t(N as i32, N as i32) })
}
/// Signed saturating shift left unsigned
@ -23020,9 +23035,10 @@ pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
}
vqshlu_n_s64_(a, int64x1_t([N as i64]))
vqshlu_n_s64_(a, const { int64x1_t(N as i64) })
}
/// Signed saturating shift left unsigned
@ -23038,10 +23054,11 @@ pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
static_assert_uimm_bits!(N, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
}
vqshlu_n_s64_(a, int64x1_t([N as i64]))
vqshlu_n_s64_(a, const { int64x1_t(N as i64) })
}
/// Signed saturating shift left unsigned
@ -23058,9 +23075,10 @@ pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
}
vqshluq_n_s8_(a, int8x16_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshluq_n_s8_(a, const { int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
}
/// Signed saturating shift left unsigned
@ -23076,10 +23094,11 @@ pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
static_assert_uimm_bits!(N, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
}
vqshluq_n_s8_(a, int8x16_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshluq_n_s8_(a, const { int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8) })
}
/// Signed saturating shift left unsigned
@ -23096,9 +23115,10 @@ pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
}
vqshluq_n_s16_(a, int16x8_t([N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16]))
vqshluq_n_s16_(a, const { int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16) })
}
/// Signed saturating shift left unsigned
@ -23114,10 +23134,11 @@ pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
static_assert_uimm_bits!(N, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
}
vqshluq_n_s16_(a, int16x8_t([N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16]))
vqshluq_n_s16_(a, const { int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16) })
}
/// Signed saturating shift left unsigned
@ -23134,9 +23155,10 @@ pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
}
vqshluq_n_s32_(a, int32x4_t([N as i32, N as i32, N as i32, N as i32]))
vqshluq_n_s32_(a, const { int32x4_t(N as i32, N as i32, N as i32, N as i32) })
}
/// Signed saturating shift left unsigned
@ -23152,10 +23174,11 @@ pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
static_assert_uimm_bits!(N, 5);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
}
vqshluq_n_s32_(a, int32x4_t([N as i32, N as i32, N as i32, N as i32]))
vqshluq_n_s32_(a, const { int32x4_t(N as i32, N as i32, N as i32, N as i32) })
}
/// Signed saturating shift left unsigned
@ -23172,9 +23195,10 @@ pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
}
vqshluq_n_s64_(a, int64x2_t([N as i64, N as i64]))
vqshluq_n_s64_(a, const { int64x2_t(N as i64, N as i64) })
}
/// Signed saturating shift left unsigned
@ -23190,10 +23214,11 @@ pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
static_assert_uimm_bits!(N, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
}
vqshluq_n_s64_(a, int64x2_t([N as i64, N as i64]))
vqshluq_n_s64_(a, const { int64x2_t(N as i64, N as i64) })
}
/// Signed saturating shift right narrow
@ -23210,9 +23235,10 @@ pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vqshrn_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqshrn_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
}
/// Signed saturating shift right narrow
@ -23248,9 +23274,10 @@ pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vqshrn_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqshrn_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
}
/// Signed saturating shift right narrow
@ -23286,9 +23313,10 @@ pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vqshrn_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqshrn_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
}
/// Signed saturating shift right narrow
@ -23324,9 +23352,10 @@ pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
}
vqshrn_n_u16_(a, uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]))
vqshrn_n_u16_(a, const { uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16) })
}
/// Unsigned saturating shift right narrow
@ -23362,9 +23391,10 @@ pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
}
vqshrn_n_u32_(a, uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]))
vqshrn_n_u32_(a, const { uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32) })
}
/// Unsigned saturating shift right narrow
@ -23400,9 +23430,10 @@ pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
}
vqshrn_n_u64_(a, uint64x2_t([-N as u64, -N as u64]))
vqshrn_n_u64_(a, const { uint64x2_t(-N as u64, -N as u64) })
}
/// Unsigned saturating shift right narrow
@ -23438,9 +23469,10 @@ pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
}
vqshrun_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqshrun_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
}
/// Signed saturating shift right unsigned narrow
@ -23476,9 +23508,10 @@ pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
}
vqshrun_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqshrun_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
}
/// Signed saturating shift right unsigned narrow
@ -23514,9 +23547,10 @@ pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vqshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
}
vqshrun_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqshrun_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
}
/// Signed saturating shift right unsigned narrow
@ -28232,9 +28266,10 @@ pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vrshrn_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vrshrn_n_s16_(a, const { int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16) })
}
/// Rounding shift right narrow
@ -28270,9 +28305,10 @@ pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vrshrn_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vrshrn_n_s32_(a, const { int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32) })
}
/// Rounding shift right narrow
@ -28308,9 +28344,10 @@ pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")]
#[rustc_intrinsic_const_vector_arg(1)]
fn vrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vrshrn_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vrshrn_n_s64_(a, const { int64x2_t(-N as i64, -N as i64) })
}
/// Rounding shift right narrow

View file

@ -23,19 +23,4 @@ vrnd64zq_f64
vrnd32x_f64
vrnd32z_f64
vrnd64x_f64
vrnd64z_f64
# LLVM select error in debug builds
#vqshlu_n_s16
#vqshlu_n_s32
#vqshlu_n_s64
#vqshlu_n_s8
#vqshlub_n_s8
#vqshlud_n_s64
#vqshluh_n_s16
#vqshluq_n_s16
#vqshluq_n_s32
#vqshluq_n_s64
#vqshluq_n_s8
#vqshlus_n_s32
vrnd64z_f64

View file

@ -182,32 +182,4 @@ vrndpq_f32
vrndq_f32
vrndq_f32
vrndx_f32
vrndxq_f32
# LLVM select error in debug builds
#vqrshrn_n_s16
#vqrshrn_n_s32
#vqrshrn_n_s64
#vqrshrn_n_u16
#vqrshrn_n_u32
#vqrshrn_n_u64
#vqrshrun_n_s16
#vqrshrun_n_s32
#vqrshrun_n_s64
#vqshrn_n_s16
#vqshrn_n_s32
#vqshrn_n_s64
#vqshrn_n_u16
#vqshrn_n_u32
#vqshrn_n_u64
#vqshrun_n_s16
#vqshrun_n_s32
#vqshrun_n_s64
#vrshrn_n_s16
#vrshrn_n_s32
#vrshrn_n_s64
#vrshrn_n_u16
#vrshrn_n_u32
#vrshrn_n_u64
#vshrq_n_u64
#vshr_n_u64
vrndxq_f32

View file

@ -2049,6 +2049,17 @@ fn gen_arm(
in_t[2].to_string(),
out_t.to_string(),
];
let get_const_vector_arg_attr = |const_option: &Option<String>| -> &str {
if let Some(const_str) = const_option {
if const_str.contains("as ttn") {
return "
#[rustc_intrinsic_const_vector_arg(1)]";
}
}
""
};
let const_vector_arg_arm = get_const_vector_arg_attr(const_arm);
let const_vector_arg_aarch64 = get_const_vector_arg_attr(const_aarch64);
if let (Some(mut link_arm), Some(mut link_aarch64)) = (link_arm.clone(), link_aarch64.clone()) {
if link_arm.contains(':') {
let links: Vec<_> = link_arm.split(':').map(|v| v.to_string()).collect();
@ -2202,7 +2213,7 @@ fn gen_arm(
ext_c_arm.push_str(&format!(
r#"#[allow(improper_ctypes)]
extern "unadjusted" {{
#[cfg_attr(target_arch = "arm", link_name = "{}")]
#[cfg_attr(target_arch = "arm", link_name = "{}")]{const_vector_arg_arm}
fn {}({}){};
}}
"#,
@ -2304,7 +2315,7 @@ fn gen_arm(
ext_c_aarch64.push_str(&format!(
r#"#[allow(improper_ctypes)]
extern "unadjusted" {{
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "{}")]
#[cfg_attr(target_arch = "aarch64", link_name = "{}")]{const_vector_arg_aarch64}
fn {}({}){};
}}
"#,
@ -2392,15 +2403,15 @@ fn gen_arm(
consts[0].clone()
} else {
let const_arm = const_arm.replace("ttn", &type_to_native_type(in_t[1]));
let mut cnt = String::from(in_t[1]);
cnt.push_str("([");
let mut cnt = String::from(format!("const {{ {}", in_t[1]));
cnt.push_str("(");
for i in 0..type_len(in_t[1]) {
if i != 0 {
cnt.push_str(", ");
}
cnt.push_str(&const_arm);
}
cnt.push_str("])");
cnt.push_str(") }");
cnt
};
match para_num {
@ -2474,8 +2485,8 @@ fn gen_arm(
}
cnt.push_str(&const_aarch64);
}
cnt.push_str("])");
format!("{current_fn}(a, {cnt})")
cnt.push_str(")");
format!("{current_fn}(a, const {{ {cnt} }})")
} else {
match para_num {
1 => format!("{current_fn}(a, {const_aarch64})"),