From 4baf95fddd7b846fd4f8a5f4e56ed0e68cf36dbc Mon Sep 17 00:00:00 2001 From: Sparrow Li Date: Wed, 25 Aug 2021 02:51:30 +0800 Subject: [PATCH] add vldx neon instructions (#1200) --- .../crates/core_arch/src/aarch64/crc.rs | 2 +- .../core_arch/src/aarch64/neon/generated.rs | 460 ++-- .../crates/core_arch/src/aarch64/neon/mod.rs | 52 +- .../crates/core_arch/src/aarch64/prefetch.rs | 2 +- .../crates/core_arch/src/aarch64/tme.rs | 2 +- .../stdarch/crates/core_arch/src/arm/dsp.rs | 2 +- .../stdarch/crates/core_arch/src/arm/ex.rs | 14 +- .../stdarch/crates/core_arch/src/arm/mod.rs | 2 +- .../stdarch/crates/core_arch/src/arm/neon.rs | 2 +- .../crates/core_arch/src/arm/simd32.rs | 2 +- .../core_arch/src/arm_shared/barrier/mod.rs | 2 +- .../crates/core_arch/src/arm_shared/crc.rs | 2 +- .../crates/core_arch/src/arm_shared/crypto.rs | 2 +- .../crates/core_arch/src/arm_shared/hints.rs | 2 +- .../src/arm_shared/neon/generated.rs | 1964 ++++++++++++++--- .../core_arch/src/arm_shared/neon/mod.rs | 362 ++- library/stdarch/crates/stdarch-gen/neon.spec | 75 + .../stdarch/crates/stdarch-gen/src/main.rs | 430 +++- .../stdarch/crates/stdarch-verify/src/lib.rs | 54 + 19 files changed, 2730 insertions(+), 703 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/crc.rs b/library/stdarch/crates/core_arch/src/aarch64/crc.rs index c19d61ca73c6..6e8128534bdd 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/crc.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/crc.rs @@ -1,4 +1,4 @@ -extern "C" { +extern "unadjusted" { #[link_name = "llvm.aarch64.crc32x"] fn crc32x_(crc: u32, data: u64) -> u32; diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 28ea2592a043..97e794f2a1cb 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -15,7 +15,7 @@ use stdarch_test::assert_instr; #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v1f64")] fn vabd_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t; } @@ -28,7 +28,7 @@ pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(fabd))] pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v2f64")] fn vabdq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -1084,7 +1084,7 @@ pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facgt))] pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64")] fn vcagt_f64_(a: float64x1_t, b: float64x1_t) -> uint64x1_t; } @@ -1097,7 +1097,7 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(facgt))] pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64")] fn vcagtq_f64_(a: float64x2_t, b: float64x2_t) -> uint64x2_t; } @@ -1110,7 +1110,7 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(facge))] pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v1i64.v1f64")] fn vcage_f64_(a: float64x1_t, b: float64x1_t) -> uint64x1_t; } @@ -1123,7 +1123,7 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(facge))] pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v2i64.v2f64")] fn vcageq_f64_(a: float64x2_t, b: float64x2_t) -> uint64x2_t; } @@ -2103,7 +2103,7 @@ pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fcvtxn))] pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64")] fn vcvtx_f32_f64_(a: float64x2_t) -> float32x2_t; } @@ -2126,7 +2126,7 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64")] fn vcvt_n_f64_s64_(a: int64x1_t, n: i32) -> float64x1_t; } @@ -2141,7 +2141,7 @@ pub unsafe fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64")] fn vcvtq_n_f64_s64_(a: int64x2_t, n: i32) -> float64x2_t; } @@ -2156,7 +2156,7 @@ pub unsafe fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32")] fn vcvts_n_f32_s32_(a: i32, n: i32) -> f32; } @@ -2171,7 +2171,7 @@ pub unsafe fn vcvts_n_f32_s32(a: i32) -> f32 { pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64")] fn vcvtd_n_f64_s64_(a: i64, n: i32) -> f64; } @@ -2186,7 +2186,7 @@ pub unsafe fn vcvtd_n_f64_s64(a: i64) -> f64 { pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64")] fn vcvt_n_f64_u64_(a: uint64x1_t, n: i32) -> float64x1_t; } @@ -2201,7 +2201,7 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64")] fn vcvtq_n_f64_u64_(a: uint64x2_t, n: i32) -> float64x2_t; } @@ -2216,7 +2216,7 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32")] fn vcvts_n_f32_u32_(a: u32, n: i32) -> f32; } @@ -2231,7 +2231,7 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64")] fn vcvtd_n_f64_u64_(a: u64, n: i32) -> f64; } @@ -2246,7 +2246,7 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64")] fn vcvt_n_s64_f64_(a: float64x1_t, n: i32) -> int64x1_t; } @@ -2261,7 +2261,7 @@ pub unsafe fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64")] fn vcvtq_n_s64_f64_(a: float64x2_t, n: i32) -> int64x2_t; } @@ -2276,7 +2276,7 @@ pub unsafe fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32")] fn vcvts_n_s32_f32_(a: f32, n: i32) -> i32; } @@ -2291,7 +2291,7 @@ pub unsafe fn vcvts_n_s32_f32(a: f32) -> i32 { pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64")] fn vcvtd_n_s64_f64_(a: f64, n: i32) -> i64; } @@ -2306,7 +2306,7 @@ pub unsafe fn vcvtd_n_s64_f64(a: f64) -> i64 { pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64")] fn vcvt_n_u64_f64_(a: float64x1_t, n: i32) -> uint64x1_t; } @@ -2321,7 +2321,7 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64")] fn vcvtq_n_u64_f64_(a: float64x2_t, n: i32) -> uint64x2_t; } @@ -2336,7 +2336,7 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32")] fn vcvts_n_u32_f32_(a: f32, n: i32) -> u32; } @@ -2351,7 +2351,7 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { static_assert!(N : i32 where N >= 1 && N <= 64); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64")] fn vcvtd_n_u64_f64_(a: f64, n: i32) -> u64; } @@ -2428,7 +2428,7 @@ pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtzs))] pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v1i64.v1f64")] fn vcvt_s64_f64_(a: float64x1_t) -> int64x1_t; } @@ -2441,7 +2441,7 @@ pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtzs))] pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v2i64.v2f64")] fn vcvtq_s64_f64_(a: float64x2_t) -> int64x2_t; } @@ -2454,7 +2454,7 @@ pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtzu))] pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v1i64.v1f64")] fn vcvt_u64_f64_(a: float64x1_t) -> uint64x1_t; } @@ -2467,7 +2467,7 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtzu))] pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v2i64.v2f64")] fn vcvtq_u64_f64_(a: float64x2_t) -> uint64x2_t; } @@ -2480,7 +2480,7 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32")] fn vcvta_s32_f32_(a: float32x2_t) -> int32x2_t; } @@ -2493,7 +2493,7 @@ pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtas))] pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32")] fn vcvtaq_s32_f32_(a: float32x4_t) -> int32x4_t; } @@ -2506,7 +2506,7 @@ pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtas))] pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64")] fn vcvta_s64_f64_(a: float64x1_t) -> int64x1_t; } @@ -2519,7 +2519,7 @@ pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtas))] pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64")] fn vcvtaq_s64_f64_(a: float64x2_t) -> int64x2_t; } @@ -2532,7 +2532,7 @@ pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.i32.f32")] fn vcvtas_s32_f32_(a: f32) -> i32; } @@ -2545,7 +2545,7 @@ pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtas))] pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtas.i64.f64")] fn vcvtad_s64_f64_(a: f64) -> i64; } @@ -2558,7 +2558,7 @@ pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtau))] pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.i32.f32")] fn vcvtas_u32_f32_(a: f32) -> u32; } @@ -2571,7 +2571,7 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtau))] pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.i64.f64")] fn vcvtad_u64_f64_(a: f64) -> u64; } @@ -2584,7 +2584,7 @@ pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtns))] pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32")] fn vcvtn_s32_f32_(a: float32x2_t) -> int32x2_t; } @@ -2597,7 +2597,7 @@ pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtns))] pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32")] fn vcvtnq_s32_f32_(a: float32x4_t) -> int32x4_t; } @@ -2610,7 +2610,7 @@ pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtns))] pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64")] fn vcvtn_s64_f64_(a: float64x1_t) -> int64x1_t; } @@ -2623,7 +2623,7 @@ pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtns))] pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64")] fn vcvtnq_s64_f64_(a: float64x2_t) -> int64x2_t; } @@ -2636,7 +2636,7 @@ pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtns))] pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.i32.f32")] fn vcvtns_s32_f32_(a: f32) -> i32; } @@ -2649,7 +2649,7 @@ pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtns))] pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtns.i64.f64")] fn vcvtnd_s64_f64_(a: f64) -> i64; } @@ -2662,7 +2662,7 @@ pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtms))] pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32")] fn vcvtm_s32_f32_(a: float32x2_t) -> int32x2_t; } @@ -2675,7 +2675,7 @@ pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtms))] pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32")] fn vcvtmq_s32_f32_(a: float32x4_t) -> int32x4_t; } @@ -2688,7 +2688,7 @@ pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtms))] pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64")] fn vcvtm_s64_f64_(a: float64x1_t) -> int64x1_t; } @@ -2701,7 +2701,7 @@ pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtms))] pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64")] fn vcvtmq_s64_f64_(a: float64x2_t) -> int64x2_t; } @@ -2714,7 +2714,7 @@ pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtms))] pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.i32.f32")] fn vcvtms_s32_f32_(a: f32) -> i32; } @@ -2727,7 +2727,7 @@ pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtms))] pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtms.i64.f64")] fn vcvtmd_s64_f64_(a: f64) -> i64; } @@ -2740,7 +2740,7 @@ pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtps))] pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32")] fn vcvtp_s32_f32_(a: float32x2_t) -> int32x2_t; } @@ -2753,7 +2753,7 @@ pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(test, assert_instr(fcvtps))] pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32")] fn vcvtpq_s32_f32_(a: float32x4_t) -> int32x4_t; } @@ -2766,7 +2766,7 @@ pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(test, assert_instr(fcvtps))] pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64")] fn vcvtp_s64_f64_(a: float64x1_t) -> int64x1_t; } @@ -2779,7 +2779,7 @@ pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(fcvtps))] pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64")] fn vcvtpq_s64_f64_(a: float64x2_t) -> int64x2_t; } @@ -2792,7 +2792,7 @@ pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtps))] pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.i32.f32")] fn vcvtps_s32_f32_(a: f32) -> i32; } @@ -2805,7 +2805,7 @@ pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 { #[cfg_attr(test, assert_instr(fcvtps))] pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtps.i64.f64")] fn vcvtpd_s64_f64_(a: f64) -> i64; } @@ -2818,7 +2818,7 @@ pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 { #[cfg_attr(test, assert_instr(fcvtau))] pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32")] fn vcvta_u32_f32_(a: float32x2_t) -> uint32x2_t; } @@ -2831,7 +2831,7 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtau))] pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32")] fn vcvtaq_u32_f32_(a: float32x4_t) -> uint32x4_t; } @@ -2844,7 +2844,7 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtau))] pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64")] fn vcvta_u64_f64_(a: float64x1_t) -> uint64x1_t; } @@ -2857,7 +2857,7 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtau))] pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64")] fn vcvtaq_u64_f64_(a: float64x2_t) -> uint64x2_t; } @@ -2870,7 +2870,7 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32")] fn vcvtn_u32_f32_(a: float32x2_t) -> uint32x2_t; } @@ -2883,7 +2883,7 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32")] fn vcvtnq_u32_f32_(a: float32x4_t) -> uint32x4_t; } @@ -2896,7 +2896,7 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtnu))] pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64")] fn vcvtn_u64_f64_(a: float64x1_t) -> uint64x1_t; } @@ -2909,7 +2909,7 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtnu))] pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64")] fn vcvtnq_u64_f64_(a: float64x2_t) -> uint64x2_t; } @@ -2922,7 +2922,7 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.i32.f32")] fn vcvtns_u32_f32_(a: f32) -> u32; } @@ -2935,7 +2935,7 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtnu))] pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtnu.i64.f64")] fn vcvtnd_u64_f64_(a: f64) -> u64; } @@ -2948,7 +2948,7 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtmu))] pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32")] fn vcvtm_u32_f32_(a: float32x2_t) -> uint32x2_t; } @@ -2961,7 +2961,7 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32")] fn vcvtmq_u32_f32_(a: float32x4_t) -> uint32x4_t; } @@ -2974,7 +2974,7 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtmu))] pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64")] fn vcvtm_u64_f64_(a: float64x1_t) -> uint64x1_t; } @@ -2987,7 +2987,7 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtmu))] pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64")] fn vcvtmq_u64_f64_(a: float64x2_t) -> uint64x2_t; } @@ -3000,7 +3000,7 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.i32.f32")] fn vcvtms_u32_f32_(a: f32) -> u32; } @@ -3013,7 +3013,7 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtmu))] pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtmu.i64.f64")] fn vcvtmd_u64_f64_(a: f64) -> u64; } @@ -3026,7 +3026,7 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtpu))] pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32")] fn vcvtp_u32_f32_(a: float32x2_t) -> uint32x2_t; } @@ -3039,7 +3039,7 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32")] fn vcvtpq_u32_f32_(a: float32x4_t) -> uint32x4_t; } @@ -3052,7 +3052,7 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(test, assert_instr(fcvtpu))] pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64")] fn vcvtp_u64_f64_(a: float64x1_t) -> uint64x1_t; } @@ -3065,7 +3065,7 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { #[cfg_attr(test, assert_instr(fcvtpu))] pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64")] fn vcvtpq_u64_f64_(a: float64x2_t) -> uint64x2_t; } @@ -3078,7 +3078,7 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.i32.f32")] fn vcvtps_u32_f32_(a: f32) -> u32; } @@ -3091,7 +3091,7 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { #[cfg_attr(test, assert_instr(fcvtpu))] pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fcvtpu.i64.f64")] fn vcvtpd_u64_f64_(a: f64) -> u64; } @@ -3914,7 +3914,7 @@ pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v1i64")] fn vqneg_s64_(a: int64x1_t) -> int64x1_t; } @@ -3927,7 +3927,7 @@ pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(sqneg))] pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v2i64")] fn vqnegq_s64_(a: int64x2_t) -> int64x2_t; } @@ -3980,7 +3980,7 @@ pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 { #[cfg_attr(test, assert_instr(uqsub))] pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.i32")] fn vqsubs_u32_(a: u32, b: u32) -> u32; } @@ -3993,7 +3993,7 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { #[cfg_attr(test, assert_instr(uqsub))] pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.i64")] fn vqsubd_u64_(a: u64, b: u64) -> u64; } @@ -4006,7 +4006,7 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(sqsub))] pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.i32")] fn vqsubs_s32_(a: i32, b: i32) -> i32; } @@ -4019,7 +4019,7 @@ pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 { #[cfg_attr(test, assert_instr(sqsub))] pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.i64")] fn vqsubd_s64_(a: i64, b: i64) -> i64; } @@ -4032,7 +4032,7 @@ pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rbit.v8i8")] fn vrbit_s8_(a: int8x8_t) -> int8x8_t; } @@ -4045,7 +4045,7 @@ pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(rbit))] pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rbit.v16i8")] fn vrbitq_s8_(a: int8x16_t) -> int8x16_t; } @@ -4090,7 +4090,7 @@ pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v2f32")] fn vrndx_f32_(a: float32x2_t) -> float32x2_t; } @@ -4103,7 +4103,7 @@ pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v4f32")] fn vrndxq_f32_(a: float32x4_t) -> float32x4_t; } @@ -4116,7 +4116,7 @@ pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v1f64")] fn vrndx_f64_(a: float64x1_t) -> float64x1_t; } @@ -4129,7 +4129,7 @@ pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frintx))] pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.rint.v2f64")] fn vrndxq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4142,7 +4142,7 @@ pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v2f32")] fn vrnda_f32_(a: float32x2_t) -> float32x2_t; } @@ -4155,7 +4155,7 @@ pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v4f32")] fn vrndaq_f32_(a: float32x4_t) -> float32x4_t; } @@ -4168,7 +4168,7 @@ pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v1f64")] fn vrnda_f64_(a: float64x1_t) -> float64x1_t; } @@ -4181,7 +4181,7 @@ pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frinta))] pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.round.v2f64")] fn vrndaq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4194,7 +4194,7 @@ pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v1f64")] fn vrndn_f64_(a: float64x1_t) -> float64x1_t; } @@ -4207,7 +4207,7 @@ pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frintn))] pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v2f64")] fn vrndnq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4220,7 +4220,7 @@ pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v2f32")] fn vrndm_f32_(a: float32x2_t) -> float32x2_t; } @@ -4233,7 +4233,7 @@ pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v4f32")] fn vrndmq_f32_(a: float32x4_t) -> float32x4_t; } @@ -4246,7 +4246,7 @@ pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v1f64")] fn vrndm_f64_(a: float64x1_t) -> float64x1_t; } @@ -4259,7 +4259,7 @@ pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frintm))] pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.floor.v2f64")] fn vrndmq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4272,7 +4272,7 @@ pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v2f32")] fn vrndp_f32_(a: float32x2_t) -> float32x2_t; } @@ -4285,7 +4285,7 @@ pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v4f32")] fn vrndpq_f32_(a: float32x4_t) -> float32x4_t; } @@ -4298,7 +4298,7 @@ pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v1f64")] fn vrndp_f64_(a: float64x1_t) -> float64x1_t; } @@ -4311,7 +4311,7 @@ pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frintp))] pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.ceil.v2f64")] fn vrndpq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4324,7 +4324,7 @@ pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v2f32")] fn vrnd_f32_(a: float32x2_t) -> float32x2_t; } @@ -4337,7 +4337,7 @@ pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v4f32")] fn vrndq_f32_(a: float32x4_t) -> float32x4_t; } @@ -4350,7 +4350,7 @@ pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v1f64")] fn vrnd_f64_(a: float64x1_t) -> float64x1_t; } @@ -4363,7 +4363,7 @@ pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frintz))] pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.trunc.v2f64")] fn vrndq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4376,7 +4376,7 @@ pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v2f32")] fn vrndi_f32_(a: float32x2_t) -> float32x2_t; } @@ -4389,7 +4389,7 @@ pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v4f32")] fn vrndiq_f32_(a: float32x4_t) -> float32x4_t; } @@ -4402,7 +4402,7 @@ pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v1f64")] fn vrndi_f64_(a: float64x1_t) -> float64x1_t; } @@ -4415,7 +4415,7 @@ pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frinti))] pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.nearbyint.v2f64")] fn vrndiq_f64_(a: float64x2_t) -> float64x2_t; } @@ -4468,7 +4468,7 @@ pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 { #[cfg_attr(test, assert_instr(uqadd))] pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.i32")] fn vqadds_u32_(a: u32, b: u32) -> u32; } @@ -4481,7 +4481,7 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { #[cfg_attr(test, assert_instr(uqadd))] pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.i64")] fn vqaddd_u64_(a: u64, b: u64) -> u64; } @@ -4494,7 +4494,7 @@ pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(sqadd))] pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.i32")] fn vqadds_s32_(a: i32, b: i32) -> i32; } @@ -4507,13 +4507,91 @@ pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 { #[cfg_attr(test, assert_instr(sqadd))] pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.i64")] fn vqaddd_s64_(a: i64, b: i64) -> i64; } vqaddd_s64_(a, b) } +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64")] + fn vld1_f64_x2_(a: *const f64) -> float64x1x2_t; + } + vld1_f64_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64")] + fn vld1q_f64_x2_(a: *const f64) -> float64x2x2_t; + } + vld1q_f64_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64")] + fn vld1_f64_x3_(a: *const f64) -> float64x1x3_t; + } + vld1_f64_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64")] + fn vld1q_f64_x3_(a: *const f64) -> float64x2x3_t; + } + vld1q_f64_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64")] + fn vld1_f64_x4_(a: *const f64) -> float64x1x4_t; + } + vld1_f64_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ld1))] +pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64")] + fn vld1q_f64_x4_(a: *const f64) -> float64x2x4_t; + } + vld1q_f64_x4_(a) +} + /// Multiply #[inline] #[target_feature(enable = "neon")] @@ -4696,7 +4774,7 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(pmull))] pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmull64")] fn vmull_p64_(a: p64, b: p64) -> int8x16_t; } @@ -4839,7 +4917,7 @@ pub unsafe fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v2f32")] fn vmulx_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; } @@ -4852,7 +4930,7 @@ pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v4f32")] fn vmulxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; } @@ -4865,7 +4943,7 @@ pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v1f64")] fn vmulx_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t; } @@ -4878,7 +4956,7 @@ pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.v2f64")] fn vmulxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -4971,7 +5049,7 @@ pub unsafe fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.f32")] fn vmulxs_f32_(a: f32, b: f32) -> f32; } @@ -4984,7 +5062,7 @@ pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 { #[cfg_attr(test, assert_instr(fmulx))] pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmulx.f64")] fn vmulxd_f64_(a: f64, b: f64) -> f64; } @@ -5037,7 +5115,7 @@ pub unsafe fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fmadd))] pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v1f64")] fn vfma_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; } @@ -5050,7 +5128,7 @@ pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float6 #[cfg_attr(test, assert_instr(fmla))] pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f64")] fn vfmaq_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; } @@ -5160,7 +5238,7 @@ pub unsafe fn vfmaq_laneq_f64(a: float64x2_t, b: float64x2_t, c #[rustc_legacy_const_generics(3)] pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")] fn vfmas_lane_f32_(a: f32, b: f32, c: f32) -> f32; } @@ -5176,7 +5254,7 @@ pub unsafe fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> #[rustc_legacy_const_generics(3)] pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")] fn vfmas_laneq_f32_(a: f32, b: f32, c: f32) -> f32; } @@ -5192,7 +5270,7 @@ pub unsafe fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) - #[rustc_legacy_const_generics(3)] pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")] fn vfmad_lane_f64_(a: f64, b: f64, c: f64) -> f64; } @@ -5208,7 +5286,7 @@ pub unsafe fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> #[rustc_legacy_const_generics(3)] pub unsafe fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")] fn vfmad_laneq_f64_(a: f64, b: f64, c: f64) -> f64; } @@ -5421,7 +5499,7 @@ pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i32.v4i16")] fn vaddlv_s16_(a: int16x4_t) -> i32; } @@ -5434,7 +5512,7 @@ pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 { #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i32.v8i16")] fn vaddlvq_s16_(a: int16x8_t) -> i32; } @@ -5447,7 +5525,7 @@ pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 { #[cfg_attr(test, assert_instr(saddlp))] pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i64.v2i32")] fn vaddlv_s32_(a: int32x2_t) -> i64; } @@ -5460,7 +5538,7 @@ pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 { #[cfg_attr(test, assert_instr(saddlv))] pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.saddlv.i64.v4i32")] fn vaddlvq_s32_(a: int32x4_t) -> i64; } @@ -5473,7 +5551,7 @@ pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 { #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16")] fn vaddlv_u16_(a: uint16x4_t) -> u32; } @@ -5486,7 +5564,7 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16")] fn vaddlvq_u16_(a: uint16x8_t) -> u32; } @@ -5499,7 +5577,7 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { #[cfg_attr(test, assert_instr(uaddlp))] pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32")] fn vaddlv_u32_(a: uint32x2_t) -> u64; } @@ -5512,7 +5590,7 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { #[cfg_attr(test, assert_instr(uaddlv))] pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32")] fn vaddlvq_u32_(a: uint32x4_t) -> u64; } @@ -5651,7 +5729,7 @@ pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v1f64")] fn vmax_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t; } @@ -5664,7 +5742,7 @@ pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(fmax))] pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v2f64")] fn vmaxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -5677,7 +5755,7 @@ pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v1f64")] fn vmaxnm_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t; } @@ -5690,7 +5768,7 @@ pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(fmaxnm))] pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v2f64")] fn vmaxnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -5703,7 +5781,7 @@ pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmp.v2f32")] fn vpmaxnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; } @@ -5716,7 +5794,7 @@ pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmp.v2f64")] fn vpmaxnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -5729,7 +5807,7 @@ pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fmaxnmp))] pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnmp.v4f32")] fn vpmaxnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; } @@ -5742,7 +5820,7 @@ pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v1f64")] fn vmin_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t; } @@ -5755,7 +5833,7 @@ pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(fmin))] pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v2f64")] fn vminq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -5768,7 +5846,7 @@ pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v1f64")] fn vminnm_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t; } @@ -5781,7 +5859,7 @@ pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(fminnm))] pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v2f64")] fn vminnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -5794,7 +5872,7 @@ pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmp.v2f32")] fn vpminnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; } @@ -5807,7 +5885,7 @@ pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(test, assert_instr(fminnmp))] pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmp.v2f64")] fn vpminnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; } @@ -5820,7 +5898,7 @@ pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fminnmp))] pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnmp.v4f32")] fn vpminnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; } @@ -5843,7 +5921,7 @@ pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 { #[cfg_attr(test, assert_instr(sqdmull))] pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulls.scalar")] fn vqdmulls_s32_(a: i32, b: i32) -> i64; } @@ -6290,7 +6368,7 @@ pub unsafe fn vqmovns_u32(a: u32) -> u16 { #[cfg_attr(test, assert_instr(sqxtn))] pub unsafe fn vqmovnd_s64(a: i64) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64")] fn vqmovnd_s64_(a: i64) -> i32; } @@ -6303,7 +6381,7 @@ pub unsafe fn vqmovnd_s64(a: i64) -> i32 { #[cfg_attr(test, assert_instr(uqxtn))] pub unsafe fn vqmovnd_u64(a: u64) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64")] fn vqmovnd_u64_(a: u64) -> u32; } @@ -6580,7 +6658,7 @@ pub unsafe fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) #[cfg_attr(test, assert_instr(sqrshl))] pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.i32")] fn vqrshls_s32_(a: i32, b: i32) -> i32; } @@ -6593,7 +6671,7 @@ pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 { #[cfg_attr(test, assert_instr(sqrshl))] pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.i64")] fn vqrshld_s64_(a: i64, b: i64) -> i64; } @@ -6626,7 +6704,7 @@ pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 { #[cfg_attr(test, assert_instr(uqrshl))] pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.i32")] fn vqrshls_u32_(a: u32, b: i32) -> u32; } @@ -6639,7 +6717,7 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { #[cfg_attr(test, assert_instr(uqrshl))] pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.i64")] fn vqrshld_u64_(a: u64, b: i64) -> u64; } @@ -6861,7 +6939,7 @@ pub unsafe fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> #[cfg_attr(test, assert_instr(sqshl))] pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.i64")] fn vqshld_s64_(a: i64, b: i64) -> i64; } @@ -6901,7 +6979,7 @@ pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 { #[cfg_attr(test, assert_instr(uqshl))] pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.i64")] fn vqshld_u64_(a: u64, b: i64) -> u64; } @@ -7023,7 +7101,7 @@ pub unsafe fn vqshld_n_u64(a: u64) -> u64 { pub unsafe fn vqshrnd_n_s64(a: i64) -> i32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.i32")] fn vqshrnd_n_s64_(a: i64, n: i32) -> i32; } @@ -7088,7 +7166,7 @@ pub unsafe fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.i32")] fn vqshrnd_n_u64_(a: u64, n: i32) -> u32; } @@ -7243,7 +7321,7 @@ pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frsqrte))] pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v1f64")] fn vrsqrte_f64_(a: float64x1_t) -> float64x1_t; } @@ -7256,7 +7334,7 @@ pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frsqrte))] pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v2f64")] fn vrsqrteq_f64_(a: float64x2_t) -> float64x2_t; } @@ -7269,7 +7347,7 @@ pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(frecpe))] pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v1f64")] fn vrecpe_f64_(a: float64x1_t) -> float64x1_t; } @@ -7282,7 +7360,7 @@ pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t { #[cfg_attr(test, assert_instr(frecpe))] pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v2f64")] fn vrecpeq_f64_(a: float64x2_t) -> float64x2_t; } @@ -8031,7 +8109,7 @@ pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { #[cfg_attr(test, assert_instr(srshl))] pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.i64")] fn vrshld_s64_(a: i64, b: i64) -> i64; } @@ -8044,7 +8122,7 @@ pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 { #[cfg_attr(test, assert_instr(urshl))] pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.i64")] fn vrshld_u64_(a: u64, b: i64) -> u64; } @@ -9446,7 +9524,7 @@ pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x #[cfg_attr(test, assert_instr(sqabs))] pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v1i64")] fn vqabs_s64_(a: int64x1_t) -> int64x1_t; } @@ -9459,7 +9537,7 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(test, assert_instr(sqabs))] pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v2i64")] fn vqabsq_s64_(a: int64x2_t) -> int64x2_t; } @@ -12857,6 +12935,54 @@ mod test { assert_eq!(r, e); } + #[simd_test(enable = "neon")] + unsafe fn test_vld1_f64_x2() { + let a: [f64; 3] = [0., 1., 2.]; + let e: [f64; 2] = [1., 2.]; + let r: [f64; 2] = transmute(vld1_f64_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_f64_x2() { + let a: [f64; 5] = [0., 1., 2., 3., 4.]; + let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(3., 4.)]; + let r: [f64x2; 2] = transmute(vld1q_f64_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_f64_x3() { + let a: [f64; 4] = [0., 1., 2., 3.]; + let e: [f64; 3] = [1., 2., 3.]; + let r: [f64; 3] = transmute(vld1_f64_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_f64_x3() { + let a: [f64; 7] = [0., 1., 2., 3., 4., 5., 6.]; + let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.)]; + let r: [f64x2; 3] = transmute(vld1q_f64_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_f64_x4() { + let a: [f64; 5] = [0., 1., 2., 3., 4.]; + let e: [f64; 4] = [1., 2., 3., 4.]; + let r: [f64; 4] = transmute(vld1_f64_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_f64_x4() { + let a: [f64; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; + let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.), f64x2::new(7., 8.)]; + let r: [f64x2; 4] = transmute(vld1q_f64_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + #[simd_test(enable = "neon")] unsafe fn test_vmul_f64() { let a: f64 = 1.0; diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 95aea69ef775..686eb9446a4d 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -25,48 +25,38 @@ types! { pub struct float64x2_t(f64, f64); } -/// ARM-specific type containing two `int8x16_t` vectors. +/// ARM-specific type containing two `float64x1_t` vectors. #[derive(Copy, Clone)] -pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); -/// ARM-specific type containing three `int8x16_t` vectors. +pub struct float64x1x2_t(pub float64x1_t, pub float64x1_t); +/// ARM-specific type containing three `float64x1_t` vectors. #[derive(Copy, Clone)] -pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); -/// ARM-specific type containing four `int8x16_t` vectors. +pub struct float64x1x3_t(pub float64x1_t, pub float64x1_t, pub float64x1_t); +/// ARM-specific type containing four `float64x1_t` vectors. #[derive(Copy, Clone)] -pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t); - -/// ARM-specific type containing two `uint8x16_t` vectors. -#[derive(Copy, Clone)] -pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); -/// ARM-specific type containing three `uint8x16_t` vectors. -#[derive(Copy, Clone)] -pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); -/// ARM-specific type containing four `uint8x16_t` vectors. -#[derive(Copy, Clone)] -pub struct uint8x16x4_t( - pub uint8x16_t, - pub uint8x16_t, - pub uint8x16_t, - pub uint8x16_t, +pub struct float64x1x4_t( + pub float64x1_t, + pub float64x1_t, + pub float64x1_t, + pub float64x1_t, ); -/// ARM-specific type containing two `poly8x16_t` vectors. +/// ARM-specific type containing two `float64x2_t` vectors. #[derive(Copy, Clone)] -pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); -/// ARM-specific type containing three `poly8x16_t` vectors. +pub struct float64x2x2_t(pub float64x2_t, pub float64x2_t); +/// ARM-specific type containing three `float64x2_t` vectors. #[derive(Copy, Clone)] -pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); -/// ARM-specific type containing four `poly8x16_t` vectors. +pub struct float64x2x3_t(pub float64x2_t, pub float64x2_t, pub float64x2_t); +/// ARM-specific type containing four `float64x2_t` vectors. #[derive(Copy, Clone)] -pub struct poly8x16x4_t( - pub poly8x16_t, - pub poly8x16_t, - pub poly8x16_t, - pub poly8x16_t, +pub struct float64x2x4_t( + pub float64x2_t, + pub float64x2_t, + pub float64x2_t, + pub float64x2_t, ); #[allow(improper_ctypes)] -extern "C" { +extern "unadjusted" { // absolute value #[link_name = "llvm.aarch64.neon.abs.i64"] fn vabsd_s64_(a: i64) -> i64; diff --git a/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs b/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs index 02b3971aa478..687c3f39a0fe 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs @@ -1,7 +1,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +extern "unadjusted" { #[link_name = "llvm.prefetch"] fn prefetch(p: *const i8, rw: i32, loc: i32, ty: i32); } diff --git a/library/stdarch/crates/core_arch/src/aarch64/tme.rs b/library/stdarch/crates/core_arch/src/aarch64/tme.rs index edf87b8034f5..d1b2cf334d1c 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/tme.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/tme.rs @@ -17,7 +17,7 @@ #[cfg(test)] use stdarch_test::assert_instr; -extern "C" { +extern "unadjusted" { #[link_name = "llvm.aarch64.tstart"] fn aarch64_tstart() -> u64; #[link_name = "llvm.aarch64.tcommit"] diff --git a/library/stdarch/crates/core_arch/src/arm/dsp.rs b/library/stdarch/crates/core_arch/src/arm/dsp.rs index 7039f0351d10..6720f97a5322 100644 --- a/library/stdarch/crates/core_arch/src/arm/dsp.rs +++ b/library/stdarch/crates/core_arch/src/arm/dsp.rs @@ -32,7 +32,7 @@ types! { pub struct uint16x2_t(u16, u16); } -extern "C" { +extern "unadjusted" { #[link_name = "llvm.arm.smulbb"] fn arm_smulbb(a: i32, b: i32) -> i32; diff --git a/library/stdarch/crates/core_arch/src/arm/ex.rs b/library/stdarch/crates/core_arch/src/arm/ex.rs index 2ad190a78621..75f37864251a 100644 --- a/library/stdarch/crates/core_arch/src/arm/ex.rs +++ b/library/stdarch/crates/core_arch/src/arm/ex.rs @@ -11,7 +11,7 @@ doc ))] pub unsafe fn __clrex() { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.clrex"] fn clrex(); } @@ -27,7 +27,7 @@ pub unsafe fn __clrex() { doc ))] pub unsafe fn __ldrexb(p: *const u8) -> u8 { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.ldrex.p0i8"] fn ldrex8(p: *const u8) -> u32; } @@ -43,7 +43,7 @@ pub unsafe fn __ldrexb(p: *const u8) -> u8 { doc ))] pub unsafe fn __ldrexh(p: *const u16) -> u16 { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.ldrex.p0i16"] fn ldrex16(p: *const u16) -> u32; } @@ -60,7 +60,7 @@ pub unsafe fn __ldrexh(p: *const u16) -> u16 { doc ))] pub unsafe fn __ldrex(p: *const u32) -> u32 { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.ldrex.p0i32"] fn ldrex32(p: *const u32) -> u32; } @@ -78,7 +78,7 @@ pub unsafe fn __ldrex(p: *const u32) -> u32 { doc ))] pub unsafe fn __strexb(value: u32, addr: *mut u8) -> u32 { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.strex.p0i8"] fn strex8(value: u32, addr: *mut u8) -> u32; } @@ -97,7 +97,7 @@ pub unsafe fn __strexb(value: u32, addr: *mut u8) -> u32 { doc ))] pub unsafe fn __strexh(value: u16, addr: *mut u16) -> u32 { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.strex.p0i16"] fn strex16(value: u32, addr: *mut u16) -> u32; } @@ -116,7 +116,7 @@ pub unsafe fn __strexh(value: u16, addr: *mut u16) -> u32 { doc ))] pub unsafe fn __strex(value: u32, addr: *mut u32) -> u32 { - extern "C" { + extern "unadjusted" { #[link_name = "llvm.arm.strex.p0i32"] fn strex32(value: u32, addr: *mut u32) -> u32; } diff --git a/library/stdarch/crates/core_arch/src/arm/mod.rs b/library/stdarch/crates/core_arch/src/arm/mod.rs index d6b12b829267..3c56ec7b1ea7 100644 --- a/library/stdarch/crates/core_arch/src/arm/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm/mod.rs @@ -107,7 +107,7 @@ pub unsafe fn __dbg() { dbg(IMM4); } -extern "C" { +extern "unadjusted" { #[link_name = "llvm.arm.dbg"] fn dbg(_: i32); } diff --git a/library/stdarch/crates/core_arch/src/arm/neon.rs b/library/stdarch/crates/core_arch/src/arm/neon.rs index 473c753fd638..cf3b16f9a0ea 100644 --- a/library/stdarch/crates/core_arch/src/arm/neon.rs +++ b/library/stdarch/crates/core_arch/src/arm/neon.rs @@ -12,7 +12,7 @@ pub(crate) type p8 = u8; pub(crate) type p16 = u16; #[allow(improper_ctypes)] -extern "C" { +extern "unadjusted" { #[link_name = "llvm.arm.neon.vbsl.v8i8"] fn vbsl_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; #[link_name = "llvm.arm.neon.vbsl.v16i8"] diff --git a/library/stdarch/crates/core_arch/src/arm/simd32.rs b/library/stdarch/crates/core_arch/src/arm/simd32.rs index 5cae2fc2aa09..2d867acc83e0 100644 --- a/library/stdarch/crates/core_arch/src/arm/simd32.rs +++ b/library/stdarch/crates/core_arch/src/arm/simd32.rs @@ -80,7 +80,7 @@ macro_rules! dsp_call { }; } -extern "C" { +extern "unadjusted" { #[link_name = "llvm.arm.qadd8"] fn arm_qadd8(a: i32, b: i32) -> i32; diff --git a/library/stdarch/crates/core_arch/src/arm_shared/barrier/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/barrier/mod.rs index b3cbf44d2773..6ccced00e3ce 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/barrier/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/barrier/mod.rs @@ -122,7 +122,7 @@ where arg.__isb() } -extern "C" { +extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.dmb")] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.dmb")] fn dmb(_: i32); diff --git a/library/stdarch/crates/core_arch/src/arm_shared/crc.rs b/library/stdarch/crates/core_arch/src/arm_shared/crc.rs index b1cfbb381b6f..e0d0fbe35673 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/crc.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/crc.rs @@ -1,4 +1,4 @@ -extern "C" { +extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crc32b")] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32b")] fn crc32b_(crc: u32, data: u32) -> u32; diff --git a/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs b/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs index 4cdebb1da4c8..56b99088bf86 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/crypto.rs @@ -1,7 +1,7 @@ use crate::core_arch::arm_shared::{uint32x4_t, uint8x16_t}; #[allow(improper_ctypes)] -extern "C" { +extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aese")] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] fn vaeseq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; diff --git a/library/stdarch/crates/core_arch/src/arm_shared/hints.rs b/library/stdarch/crates/core_arch/src/arm_shared/hints.rs index 3145cde8d563..1d6551e5e7b1 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/hints.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/hints.rs @@ -80,7 +80,7 @@ pub unsafe fn __nop() { asm!("nop", options(nomem, nostack, preserves_flags)); } -extern "C" { +extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.hint")] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.hint")] fn hint(_: i32); diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 7dc5b53e1015..bdf8937d9d9d 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -497,7 +497,7 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v8i8")] fn vabd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -513,7 +513,7 @@ vabd_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v16i8")] fn vabdq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -529,7 +529,7 @@ vabdq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v4i16")] fn vabd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -545,7 +545,7 @@ vabd_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v8i16")] fn vabdq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -561,7 +561,7 @@ vabdq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v2i32")] fn vabd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -577,7 +577,7 @@ vabd_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v4i32")] fn vabdq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -593,7 +593,7 @@ vabdq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v8i8")] fn vabd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -609,7 +609,7 @@ vabd_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v16i8")] fn vabdq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -625,7 +625,7 @@ vabdq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v4i16")] fn vabd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -641,7 +641,7 @@ vabd_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v8i16")] fn vabdq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -657,7 +657,7 @@ vabdq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v2i32")] fn vabd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -673,7 +673,7 @@ vabd_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v4i32")] fn vabdq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -689,7 +689,7 @@ vabdq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v2f32")] fn vabd_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; @@ -705,7 +705,7 @@ vabd_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v4f32")] fn vabdq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; @@ -1692,7 +1692,7 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v8i8")] fn vcls_s8_(a: int8x8_t) -> int8x8_t; @@ -1708,7 +1708,7 @@ vcls_s8_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v16i8")] fn vclsq_s8_(a: int8x16_t) -> int8x16_t; @@ -1724,7 +1724,7 @@ vclsq_s8_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v4i16")] fn vcls_s16_(a: int16x4_t) -> int16x4_t; @@ -1740,7 +1740,7 @@ vcls_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v8i16")] fn vclsq_s16_(a: int16x8_t) -> int16x8_t; @@ -1756,7 +1756,7 @@ vclsq_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v2i32")] fn vcls_s32_(a: int32x2_t) -> int32x2_t; @@ -1772,7 +1772,7 @@ vcls_s32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v4i32")] fn vclsq_s32_(a: int32x4_t) -> int32x4_t; @@ -1908,7 +1908,7 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32")] fn vcagt_f32_(a: float32x2_t, b: float32x2_t) -> uint32x2_t; @@ -1924,7 +1924,7 @@ vcagt_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32")] fn vcagtq_f32_(a: float32x4_t, b: float32x4_t) -> uint32x4_t; @@ -1940,7 +1940,7 @@ vcagtq_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v2i32.v2f32")] fn vcage_f32_(a: float32x2_t, b: float32x2_t) -> uint32x2_t; @@ -1956,7 +1956,7 @@ vcage_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v4i32.v4f32")] fn vcageq_f32_(a: float32x4_t, b: float32x4_t) -> uint32x4_t; @@ -2153,7 +2153,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32")] fn vcvt_n_f32_s32_(a: int32x2_t, n: i32) -> float32x2_t; } @@ -2169,7 +2169,7 @@ vcvt_n_f32_s32_(a, N) pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32")] fn vcvt_n_f32_s32_(a: int32x2_t, n: i32) -> float32x2_t; } @@ -2185,7 +2185,7 @@ vcvt_n_f32_s32_(a, N) pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32")] fn vcvtq_n_f32_s32_(a: int32x4_t, n: i32) -> float32x4_t; } @@ -2201,7 +2201,7 @@ vcvtq_n_f32_s32_(a, N) pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32")] fn vcvtq_n_f32_s32_(a: int32x4_t, n: i32) -> float32x4_t; } @@ -2217,7 +2217,7 @@ vcvtq_n_f32_s32_(a, N) pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32")] fn vcvt_n_f32_u32_(a: uint32x2_t, n: i32) -> float32x2_t; } @@ -2233,7 +2233,7 @@ vcvt_n_f32_u32_(a, N) pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32")] fn vcvt_n_f32_u32_(a: uint32x2_t, n: i32) -> float32x2_t; } @@ -2249,7 +2249,7 @@ vcvt_n_f32_u32_(a, N) pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32")] fn vcvtq_n_f32_u32_(a: uint32x4_t, n: i32) -> float32x4_t; } @@ -2265,7 +2265,7 @@ vcvtq_n_f32_u32_(a, N) pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32")] fn vcvtq_n_f32_u32_(a: uint32x4_t, n: i32) -> float32x4_t; } @@ -2281,7 +2281,7 @@ vcvtq_n_f32_u32_(a, N) pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32")] fn vcvt_n_s32_f32_(a: float32x2_t, n: i32) -> int32x2_t; } @@ -2297,7 +2297,7 @@ vcvt_n_s32_f32_(a, N) pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32")] fn vcvt_n_s32_f32_(a: float32x2_t, n: i32) -> int32x2_t; } @@ -2313,7 +2313,7 @@ vcvt_n_s32_f32_(a, N) pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32")] fn vcvtq_n_s32_f32_(a: float32x4_t, n: i32) -> int32x4_t; } @@ -2329,7 +2329,7 @@ vcvtq_n_s32_f32_(a, N) pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32")] fn vcvtq_n_s32_f32_(a: float32x4_t, n: i32) -> int32x4_t; } @@ -2345,7 +2345,7 @@ vcvtq_n_s32_f32_(a, N) pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32")] fn vcvt_n_u32_f32_(a: float32x2_t, n: i32) -> uint32x2_t; } @@ -2361,7 +2361,7 @@ vcvt_n_u32_f32_(a, N) pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32")] fn vcvt_n_u32_f32_(a: float32x2_t, n: i32) -> uint32x2_t; } @@ -2377,7 +2377,7 @@ vcvt_n_u32_f32_(a, N) pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32")] fn vcvtq_n_u32_f32_(a: float32x4_t, n: i32) -> uint32x4_t; } @@ -2393,7 +2393,7 @@ vcvtq_n_u32_f32_(a, N) pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32")] fn vcvtq_n_u32_f32_(a: float32x4_t, n: i32) -> uint32x4_t; } @@ -2408,7 +2408,7 @@ vcvtq_n_u32_f32_(a, N) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v2i32.v2f32")] fn vcvt_s32_f32_(a: float32x2_t) -> int32x2_t; @@ -2424,7 +2424,7 @@ vcvt_s32_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v4i32.v4f32")] fn vcvtq_s32_f32_(a: float32x4_t) -> int32x4_t; @@ -2440,7 +2440,7 @@ vcvtq_s32_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v2i32.v2f32")] fn vcvt_u32_f32_(a: float32x2_t) -> uint32x2_t; @@ -2456,7 +2456,7 @@ vcvt_u32_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v4i32.v4f32")] fn vcvtq_u32_f32_(a: float32x4_t) -> uint32x4_t; @@ -4842,7 +4842,7 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v8i8")] fn vqneg_s8_(a: int8x8_t) -> int8x8_t; @@ -4858,7 +4858,7 @@ vqneg_s8_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v16i8")] fn vqnegq_s8_(a: int8x16_t) -> int8x16_t; @@ -4874,7 +4874,7 @@ vqnegq_s8_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v4i16")] fn vqneg_s16_(a: int16x4_t) -> int16x4_t; @@ -4890,7 +4890,7 @@ vqneg_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v8i16")] fn vqnegq_s16_(a: int16x8_t) -> int16x8_t; @@ -4906,7 +4906,7 @@ vqnegq_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v2i32")] fn vqneg_s32_(a: int32x2_t) -> int32x2_t; @@ -4922,7 +4922,7 @@ vqneg_s32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v4i32")] fn vqnegq_s32_(a: int32x4_t) -> int32x4_t; @@ -4938,7 +4938,7 @@ vqnegq_s32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v8i8")] fn vqsub_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -4954,7 +4954,7 @@ vqsub_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v16i8")] fn vqsubq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -4970,7 +4970,7 @@ vqsubq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v4i16")] fn vqsub_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -4986,7 +4986,7 @@ vqsub_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v8i16")] fn vqsubq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -5002,7 +5002,7 @@ vqsubq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v2i32")] fn vqsub_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -5018,7 +5018,7 @@ vqsub_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v4i32")] fn vqsubq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -5034,7 +5034,7 @@ vqsubq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v1i64")] fn vqsub_u64_(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; @@ -5050,7 +5050,7 @@ vqsub_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v2i64")] fn vqsubq_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; @@ -5066,7 +5066,7 @@ vqsubq_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v8i8")] fn vqsub_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -5082,7 +5082,7 @@ vqsub_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v16i8")] fn vqsubq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -5098,7 +5098,7 @@ vqsubq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v4i16")] fn vqsub_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -5114,7 +5114,7 @@ vqsub_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v8i16")] fn vqsubq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -5130,7 +5130,7 @@ vqsubq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v2i32")] fn vqsub_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -5146,7 +5146,7 @@ vqsub_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v4i32")] fn vqsubq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -5162,7 +5162,7 @@ vqsubq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v1i64")] fn vqsub_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t; @@ -5178,7 +5178,7 @@ vqsub_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v2i64")] fn vqsubq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; @@ -5194,7 +5194,7 @@ vqsubq_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v8i8")] fn vhadd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -5210,7 +5210,7 @@ vhadd_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v16i8")] fn vhaddq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -5226,7 +5226,7 @@ vhaddq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v4i16")] fn vhadd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -5242,7 +5242,7 @@ vhadd_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v8i16")] fn vhaddq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -5258,7 +5258,7 @@ vhaddq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v2i32")] fn vhadd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -5274,7 +5274,7 @@ vhadd_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v4i32")] fn vhaddq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -5290,7 +5290,7 @@ vhaddq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v8i8")] fn vhadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -5306,7 +5306,7 @@ vhadd_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v16i8")] fn vhaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -5322,7 +5322,7 @@ vhaddq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v4i16")] fn vhadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -5338,7 +5338,7 @@ vhadd_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v8i16")] fn vhaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -5354,7 +5354,7 @@ vhaddq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v2i32")] fn vhadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -5370,7 +5370,7 @@ vhadd_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v4i32")] fn vhaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -5386,7 +5386,7 @@ vhaddq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v8i8")] fn vrhadd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -5402,7 +5402,7 @@ vrhadd_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v16i8")] fn vrhaddq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -5418,7 +5418,7 @@ vrhaddq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v4i16")] fn vrhadd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -5434,7 +5434,7 @@ vrhadd_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v8i16")] fn vrhaddq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -5450,7 +5450,7 @@ vrhaddq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v2i32")] fn vrhadd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -5466,7 +5466,7 @@ vrhadd_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v4i32")] fn vrhaddq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -5482,7 +5482,7 @@ vrhaddq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v8i8")] fn vrhadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -5498,7 +5498,7 @@ vrhadd_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v16i8")] fn vrhaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -5514,7 +5514,7 @@ vrhaddq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v4i16")] fn vrhadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -5530,7 +5530,7 @@ vrhadd_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v8i16")] fn vrhaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -5546,7 +5546,7 @@ vrhaddq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v2i32")] fn vrhadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -5562,7 +5562,7 @@ vrhadd_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v4i32")] fn vrhaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -5578,7 +5578,7 @@ vrhaddq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v2f32")] fn vrndn_f32_(a: float32x2_t) -> float32x2_t; @@ -5594,7 +5594,7 @@ vrndn_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v4f32")] fn vrndnq_f32_(a: float32x4_t) -> float32x4_t; @@ -5610,7 +5610,7 @@ vrndnq_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v8i8")] fn vqadd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -5626,7 +5626,7 @@ vqadd_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v16i8")] fn vqaddq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -5642,7 +5642,7 @@ vqaddq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v4i16")] fn vqadd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -5658,7 +5658,7 @@ vqadd_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v8i16")] fn vqaddq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -5674,7 +5674,7 @@ vqaddq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v2i32")] fn vqadd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -5690,7 +5690,7 @@ vqadd_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v4i32")] fn vqaddq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -5706,7 +5706,7 @@ vqaddq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v1i64")] fn vqadd_u64_(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; @@ -5722,7 +5722,7 @@ vqadd_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v2i64")] fn vqaddq_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; @@ -5738,7 +5738,7 @@ vqaddq_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v8i8")] fn vqadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -5754,7 +5754,7 @@ vqadd_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v16i8")] fn vqaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -5770,7 +5770,7 @@ vqaddq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v4i16")] fn vqadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -5786,7 +5786,7 @@ vqadd_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v8i16")] fn vqaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -5802,7 +5802,7 @@ vqaddq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v2i32")] fn vqadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -5818,7 +5818,7 @@ vqadd_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v4i32")] fn vqaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -5834,7 +5834,7 @@ vqaddq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v1i64")] fn vqadd_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t; @@ -5850,7 +5850,7 @@ vqadd_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v2i64")] fn vqaddq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; @@ -5858,6 +5858,846 @@ pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { vqaddq_s64_(a, b) } +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8")] + fn vld1_s8_x2_(a: *const i8) -> int8x8x2_t; + } +vld1_s8_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16")] + fn vld1_s16_x2_(a: *const i16) -> int16x4x2_t; + } +vld1_s16_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32")] + fn vld1_s32_x2_(a: *const i32) -> int32x2x2_t; + } +vld1_s32_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0i64")] + fn vld1_s64_x2_(a: *const i64) -> int64x1x2_t; + } +vld1_s64_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8")] + fn vld1q_s8_x2_(a: *const i8) -> int8x16x2_t; + } +vld1q_s8_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16")] + fn vld1q_s16_x2_(a: *const i16) -> int16x8x2_t; + } +vld1q_s16_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32")] + fn vld1q_s32_x2_(a: *const i32) -> int32x4x2_t; + } +vld1q_s32_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64")] + fn vld1q_s64_x2_(a: *const i64) -> int64x2x2_t; + } +vld1q_s64_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8")] + fn vld1_s8_x3_(a: *const i8) -> int8x8x3_t; + } +vld1_s8_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16")] + fn vld1_s16_x3_(a: *const i16) -> int16x4x3_t; + } +vld1_s16_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32")] + fn vld1_s32_x3_(a: *const i32) -> int32x2x3_t; + } +vld1_s32_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0i64")] + fn vld1_s64_x3_(a: *const i64) -> int64x1x3_t; + } +vld1_s64_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8")] + fn vld1q_s8_x3_(a: *const i8) -> int8x16x3_t; + } +vld1q_s8_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16")] + fn vld1q_s16_x3_(a: *const i16) -> int16x8x3_t; + } +vld1q_s16_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32")] + fn vld1q_s32_x3_(a: *const i32) -> int32x4x3_t; + } +vld1q_s32_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64")] + fn vld1q_s64_x3_(a: *const i64) -> int64x2x3_t; + } +vld1q_s64_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8")] + fn vld1_s8_x4_(a: *const i8) -> int8x8x4_t; + } +vld1_s8_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16")] + fn vld1_s16_x4_(a: *const i16) -> int16x4x4_t; + } +vld1_s16_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32")] + fn vld1_s32_x4_(a: *const i32) -> int32x2x4_t; + } +vld1_s32_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0i64")] + fn vld1_s64_x4_(a: *const i64) -> int64x1x4_t; + } +vld1_s64_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8")] + fn vld1q_s8_x4_(a: *const i8) -> int8x16x4_t; + } +vld1q_s8_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16")] + fn vld1q_s16_x4_(a: *const i16) -> int16x8x4_t; + } +vld1q_s16_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32")] + fn vld1q_s32_x4_(a: *const i32) -> int32x4x4_t; + } +vld1q_s32_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64")] + fn vld1q_s64_x4_(a: *const i64) -> int64x2x4_t; + } +vld1q_s64_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { + transmute(vld1_s8_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { + transmute(vld1_s16_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { + transmute(vld1_s32_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { + transmute(vld1_s64_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { + transmute(vld1q_s8_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { + transmute(vld1q_s16_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { + transmute(vld1q_s32_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { + transmute(vld1q_s64_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { + transmute(vld1_s8_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { + transmute(vld1_s16_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { + transmute(vld1_s32_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { + transmute(vld1_s64_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { + transmute(vld1q_s8_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { + transmute(vld1q_s16_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { + transmute(vld1q_s32_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { + transmute(vld1q_s64_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { + transmute(vld1_s8_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { + transmute(vld1_s16_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { + transmute(vld1_s32_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { + transmute(vld1_s64_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { + transmute(vld1q_s8_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { + transmute(vld1q_s16_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { + transmute(vld1q_s32_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { + transmute(vld1q_s64_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { + transmute(vld1_s8_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { + transmute(vld1_s8_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { + transmute(vld1_s8_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { + transmute(vld1q_s8_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { + transmute(vld1q_s8_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { + transmute(vld1q_s8_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { + transmute(vld1_s16_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { + transmute(vld1_s16_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { + transmute(vld1_s16_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { + transmute(vld1q_s16_x2(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { + transmute(vld1q_s16_x3(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { + transmute(vld1q_s16_x4(transmute(a))) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32")] + fn vld1_f32_x2_(a: *const f32) -> float32x2x2_t; + } +vld1_f32_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32")] + fn vld1q_f32_x2_(a: *const f32) -> float32x4x2_t; + } +vld1q_f32_x2_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32")] + fn vld1_f32_x3_(a: *const f32) -> float32x2x3_t; + } +vld1_f32_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32")] + fn vld1q_f32_x3_(a: *const f32) -> float32x4x3_t; + } +vld1q_f32_x3_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32")] + fn vld1_f32_x4_(a: *const f32) -> float32x2x4_t; + } +vld1_f32_x4_(a) +} + +/// Load multiple single-element structures to one, two, three, or four registers +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] +pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { + #[allow(improper_ctypes)] + extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32")] + fn vld1q_f32_x4_(a: *const f32) -> float32x4x4_t; + } +vld1q_f32_x4_(a) +} + /// Multiply #[inline] #[target_feature(enable = "neon")] @@ -5986,7 +6826,7 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmul.v8i8")] fn vmul_p8_(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; @@ -6002,7 +6842,7 @@ vmul_p8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmul.v16i8")] fn vmulq_p8_(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; @@ -6378,7 +7218,7 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smull.v8i8")] fn vmull_s8_(a: int8x8_t, b: int8x8_t) -> int16x8_t; @@ -6394,7 +7234,7 @@ vmull_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smull.v4i16")] fn vmull_s16_(a: int16x4_t, b: int16x4_t) -> int32x4_t; @@ -6410,7 +7250,7 @@ vmull_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smull.v2i32")] fn vmull_s32_(a: int32x2_t, b: int32x2_t) -> int64x2_t; @@ -6426,7 +7266,7 @@ vmull_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umull.v8i8")] fn vmull_u8_(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; @@ -6442,7 +7282,7 @@ vmull_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umull.v4i16")] fn vmull_u16_(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; @@ -6458,7 +7298,7 @@ vmull_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umull.v2i32")] fn vmull_u32_(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; @@ -6474,7 +7314,7 @@ vmull_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmull))] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmull.v8i8")] fn vmull_p8_(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; @@ -6626,7 +7466,7 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")] fn vfma_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; @@ -6642,7 +7482,7 @@ vfma_f32_(b, c, a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")] fn vfmaq_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; @@ -7032,7 +7872,7 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v8i8")] fn vhsub_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -7048,7 +7888,7 @@ vhsub_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v16i8")] fn vhsubq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -7064,7 +7904,7 @@ vhsubq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v4i16")] fn vhsub_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -7080,7 +7920,7 @@ vhsub_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v8i16")] fn vhsubq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -7096,7 +7936,7 @@ vhsubq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v2i32")] fn vhsub_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -7112,7 +7952,7 @@ vhsub_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v4i32")] fn vhsubq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -7128,7 +7968,7 @@ vhsubq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v8i8")] fn vhsub_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -7144,7 +7984,7 @@ vhsub_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v16i8")] fn vhsubq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -7160,7 +8000,7 @@ vhsubq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v4i16")] fn vhsub_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -7176,7 +8016,7 @@ vhsub_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v8i16")] fn vhsubq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -7192,7 +8032,7 @@ vhsubq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v2i32")] fn vhsub_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -7208,7 +8048,7 @@ vhsub_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v4i32")] fn vhsubq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -7356,7 +8196,7 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v8i8")] fn vmax_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -7372,7 +8212,7 @@ vmax_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v16i8")] fn vmaxq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -7388,7 +8228,7 @@ vmaxq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v4i16")] fn vmax_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -7404,7 +8244,7 @@ vmax_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v8i16")] fn vmaxq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -7420,7 +8260,7 @@ vmaxq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v2i32")] fn vmax_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -7436,7 +8276,7 @@ vmax_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v4i32")] fn vmaxq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -7452,7 +8292,7 @@ vmaxq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v8i8")] fn vmax_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -7468,7 +8308,7 @@ vmax_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v16i8")] fn vmaxq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -7484,7 +8324,7 @@ vmaxq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v4i16")] fn vmax_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -7500,7 +8340,7 @@ vmax_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v8i16")] fn vmaxq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -7516,7 +8356,7 @@ vmaxq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v2i32")] fn vmax_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -7532,7 +8372,7 @@ vmax_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v4i32")] fn vmaxq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -7548,7 +8388,7 @@ vmaxq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v2f32")] fn vmax_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; @@ -7564,7 +8404,7 @@ vmax_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v4f32")] fn vmaxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; @@ -7580,7 +8420,7 @@ vmaxq_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v2f32")] fn vmaxnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; @@ -7596,7 +8436,7 @@ vmaxnm_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v4f32")] fn vmaxnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; @@ -7612,7 +8452,7 @@ vmaxnmq_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v8i8")] fn vmin_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -7628,7 +8468,7 @@ vmin_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v16i8")] fn vminq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -7644,7 +8484,7 @@ vminq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v4i16")] fn vmin_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -7660,7 +8500,7 @@ vmin_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v8i16")] fn vminq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -7676,7 +8516,7 @@ vminq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v2i32")] fn vmin_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -7692,7 +8532,7 @@ vmin_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v4i32")] fn vminq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -7708,7 +8548,7 @@ vminq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v8i8")] fn vmin_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; @@ -7724,7 +8564,7 @@ vmin_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v16i8")] fn vminq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; @@ -7740,7 +8580,7 @@ vminq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v4i16")] fn vmin_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; @@ -7756,7 +8596,7 @@ vmin_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v8i16")] fn vminq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; @@ -7772,7 +8612,7 @@ vminq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v2i32")] fn vmin_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; @@ -7788,7 +8628,7 @@ vmin_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v4i32")] fn vminq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; @@ -7804,7 +8644,7 @@ vminq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v2f32")] fn vmin_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; @@ -7820,7 +8660,7 @@ vmin_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v4f32")] fn vminq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; @@ -7836,7 +8676,7 @@ vminq_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v2f32")] fn vminnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t; @@ -7852,7 +8692,7 @@ vminnm_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v4f32")] fn vminnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; @@ -7868,7 +8708,7 @@ vminnmq_f32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmull.v4i32")] fn vqdmull_s16_(a: int16x4_t, b: int16x4_t) -> int32x4_t; @@ -7884,7 +8724,7 @@ vqdmull_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmull.v2i64")] fn vqdmull_s32_(a: int32x2_t, b: int32x2_t) -> int64x2_t; @@ -8074,7 +8914,7 @@ pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v4i16")] fn vqdmulh_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -8090,7 +8930,7 @@ vqdmulh_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v8i16")] fn vqdmulhq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -8106,7 +8946,7 @@ vqdmulhq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v2i32")] fn vqdmulh_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -8122,7 +8962,7 @@ vqdmulh_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v4i32")] fn vqdmulhq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -8182,7 +9022,7 @@ pub unsafe fn vqdmulhq_nq_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtn.v8i8")] fn vqmovn_s16_(a: int16x8_t) -> int8x8_t; @@ -8198,7 +9038,7 @@ vqmovn_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtn.v4i16")] fn vqmovn_s32_(a: int32x4_t) -> int16x4_t; @@ -8214,7 +9054,7 @@ vqmovn_s32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtn.v2i32")] fn vqmovn_s64_(a: int64x2_t) -> int32x2_t; @@ -8230,7 +9070,7 @@ vqmovn_s64_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqxtn.v8i8")] fn vqmovn_u16_(a: uint16x8_t) -> uint8x8_t; @@ -8246,7 +9086,7 @@ vqmovn_u16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqxtn.v4i16")] fn vqmovn_u32_(a: uint32x4_t) -> uint16x4_t; @@ -8262,7 +9102,7 @@ vqmovn_u32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqxtn.v2i32")] fn vqmovn_u64_(a: uint64x2_t) -> uint32x2_t; @@ -8278,7 +9118,7 @@ vqmovn_u64_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtun.v8i8")] fn vqmovun_s16_(a: int16x8_t) -> uint8x8_t; @@ -8294,7 +9134,7 @@ vqmovun_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtun.v4i16")] fn vqmovun_s32_(a: int32x4_t) -> uint16x4_t; @@ -8310,7 +9150,7 @@ vqmovun_s32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtun.v2i32")] fn vqmovun_s64_(a: int64x2_t) -> uint32x2_t; @@ -8326,7 +9166,7 @@ vqmovun_s64_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v4i16")] fn vqrdmulh_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -8342,7 +9182,7 @@ vqrdmulh_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v8i16")] fn vqrdmulhq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -8358,7 +9198,7 @@ vqrdmulhq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v2i32")] fn vqrdmulh_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -8374,7 +9214,7 @@ vqrdmulh_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v4i32")] fn vqrdmulhq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -8806,7 +9646,7 @@ pub unsafe fn vqrdmlshq_laneq_s32(a: int32x4_t, b: int32x4_t, c #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v8i8")] fn vqrshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -8822,7 +9662,7 @@ vqrshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v16i8")] fn vqrshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -8838,7 +9678,7 @@ vqrshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v4i16")] fn vqrshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -8854,7 +9694,7 @@ vqrshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v8i16")] fn vqrshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -8870,7 +9710,7 @@ vqrshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v2i32")] fn vqrshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -8886,7 +9726,7 @@ vqrshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v4i32")] fn vqrshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -8902,7 +9742,7 @@ vqrshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v1i64")] fn vqrshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t; @@ -8918,7 +9758,7 @@ vqrshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v2i64")] fn vqrshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; @@ -8934,7 +9774,7 @@ vqrshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v8i8")] fn vqrshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; @@ -8950,7 +9790,7 @@ vqrshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v16i8")] fn vqrshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; @@ -8966,7 +9806,7 @@ vqrshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v4i16")] fn vqrshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; @@ -8982,7 +9822,7 @@ vqrshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v8i16")] fn vqrshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; @@ -8998,7 +9838,7 @@ vqrshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v2i32")] fn vqrshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; @@ -9014,7 +9854,7 @@ vqrshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v4i32")] fn vqrshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; @@ -9030,7 +9870,7 @@ vqrshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v1i64")] fn vqrshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; @@ -9046,7 +9886,7 @@ vqrshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v2i64")] fn vqrshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; @@ -9063,7 +9903,7 @@ vqrshlq_u64_(a, b) pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn vqrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -9079,7 +9919,7 @@ vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1 pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v8i8")] fn vqrshrn_n_s16_(a: int16x8_t, n: i32) -> int8x8_t; } @@ -9095,7 +9935,7 @@ vqrshrn_n_s16_(a, N) pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn vqrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -9111,7 +9951,7 @@ vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v4i16")] fn vqrshrn_n_s32_(a: int32x4_t, n: i32) -> int16x4_t; } @@ -9127,7 +9967,7 @@ vqrshrn_n_s32_(a, N) pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn vqrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -9143,7 +9983,7 @@ vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v2i32")] fn vqrshrn_n_s64_(a: int64x2_t, n: i32) -> int32x2_t; } @@ -9159,7 +9999,7 @@ vqrshrn_n_s64_(a, N) pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] fn vqrshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; } @@ -9175,7 +10015,7 @@ vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v8i8")] fn vqrshrn_n_u16_(a: uint16x8_t, n: i32) -> uint8x8_t; } @@ -9191,7 +10031,7 @@ vqrshrn_n_u16_(a, N) pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] fn vqrshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; } @@ -9207,7 +10047,7 @@ vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32)) pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v4i16")] fn vqrshrn_n_u32_(a: uint32x4_t, n: i32) -> uint16x4_t; } @@ -9223,7 +10063,7 @@ vqrshrn_n_u32_(a, N) pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] fn vqrshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; } @@ -9239,7 +10079,7 @@ vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64)) pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v2i32")] fn vqrshrn_n_u64_(a: uint64x2_t, n: i32) -> uint32x2_t; } @@ -9255,7 +10095,7 @@ vqrshrn_n_u64_(a, N) pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn vqrshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } @@ -9271,7 +10111,7 @@ vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v8i8")] fn vqrshrun_n_s16_(a: int16x8_t, n: i32) -> uint8x8_t; } @@ -9287,7 +10127,7 @@ vqrshrun_n_s16_(a, N) pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn vqrshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } @@ -9303,7 +10143,7 @@ vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v4i16")] fn vqrshrun_n_s32_(a: int32x4_t, n: i32) -> uint16x4_t; } @@ -9319,7 +10159,7 @@ vqrshrun_n_s32_(a, N) pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn vqrshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } @@ -9335,7 +10175,7 @@ vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64)) pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v2i32")] fn vqrshrun_n_s64_(a: int64x2_t, n: i32) -> uint32x2_t; } @@ -9350,7 +10190,7 @@ vqrshrun_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v8i8")] fn vqshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -9366,7 +10206,7 @@ vqshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v16i8")] fn vqshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -9382,7 +10222,7 @@ vqshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v4i16")] fn vqshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -9398,7 +10238,7 @@ vqshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v8i16")] fn vqshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -9414,7 +10254,7 @@ vqshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v2i32")] fn vqshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -9430,7 +10270,7 @@ vqshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v4i32")] fn vqshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -9446,7 +10286,7 @@ vqshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v1i64")] fn vqshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t; @@ -9462,7 +10302,7 @@ vqshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v2i64")] fn vqshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; @@ -9478,7 +10318,7 @@ vqshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v8i8")] fn vqshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; @@ -9494,7 +10334,7 @@ vqshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v16i8")] fn vqshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; @@ -9510,7 +10350,7 @@ vqshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v4i16")] fn vqshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; @@ -9526,7 +10366,7 @@ vqshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v8i16")] fn vqshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; @@ -9542,7 +10382,7 @@ vqshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v2i32")] fn vqshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; @@ -9558,7 +10398,7 @@ vqshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v4i32")] fn vqshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; @@ -9574,7 +10414,7 @@ vqshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v1i64")] fn vqshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; @@ -9590,7 +10430,7 @@ vqshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v2i64")] fn vqshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; @@ -9799,7 +10639,7 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn vqshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -9815,7 +10655,7 @@ vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16 pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v8i8")] fn vqshrn_n_s16_(a: int16x8_t, n: i32) -> int8x8_t; } @@ -9831,7 +10671,7 @@ vqshrn_n_s16_(a, N) pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn vqshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -9847,7 +10687,7 @@ vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v4i16")] fn vqshrn_n_s32_(a: int32x4_t, n: i32) -> int16x4_t; } @@ -9863,7 +10703,7 @@ vqshrn_n_s32_(a, N) pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn vqshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -9879,7 +10719,7 @@ vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v2i32")] fn vqshrn_n_s64_(a: int64x2_t, n: i32) -> int32x2_t; } @@ -9895,7 +10735,7 @@ vqshrn_n_s64_(a, N) pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] fn vqshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; } @@ -9911,7 +10751,7 @@ vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u1 pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v8i8")] fn vqshrn_n_u16_(a: uint16x8_t, n: i32) -> uint8x8_t; } @@ -9927,7 +10767,7 @@ vqshrn_n_u16_(a, N) pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] fn vqshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; } @@ -9943,7 +10783,7 @@ vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32)) pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v4i16")] fn vqshrn_n_u32_(a: uint32x4_t, n: i32) -> uint16x4_t; } @@ -9959,7 +10799,7 @@ vqshrn_n_u32_(a, N) pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] fn vqshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; } @@ -9975,7 +10815,7 @@ vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64)) pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v2i32")] fn vqshrn_n_u64_(a: uint64x2_t, n: i32) -> uint32x2_t; } @@ -9991,7 +10831,7 @@ vqshrn_n_u64_(a, N) pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn vqshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } @@ -10007,7 +10847,7 @@ vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1 pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v8i8")] fn vqshrun_n_s16_(a: int16x8_t, n: i32) -> uint8x8_t; } @@ -10023,7 +10863,7 @@ vqshrun_n_s16_(a, N) pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn vqshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } @@ -10039,7 +10879,7 @@ vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v4i16")] fn vqshrun_n_s32_(a: int32x4_t, n: i32) -> uint16x4_t; } @@ -10055,7 +10895,7 @@ vqshrun_n_s32_(a, N) pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn vqshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } @@ -10071,7 +10911,7 @@ vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64)) pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v2i32")] fn vqshrun_n_s64_(a: int64x2_t, n: i32) -> uint32x2_t; } @@ -10086,7 +10926,7 @@ vqshrun_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v2f32")] fn vrsqrte_f32_(a: float32x2_t) -> float32x2_t; @@ -10102,7 +10942,7 @@ vrsqrte_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v4f32")] fn vrsqrteq_f32_(a: float32x4_t) -> float32x4_t; @@ -10118,7 +10958,7 @@ vrsqrteq_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v2f32")] fn vrecpe_f32_(a: float32x2_t) -> float32x2_t; @@ -10134,7 +10974,7 @@ vrecpe_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v4f32")] fn vrecpeq_f32_(a: float32x4_t) -> float32x4_t; @@ -12350,7 +13190,7 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v8i8")] fn vrshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -12366,7 +13206,7 @@ vrshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v16i8")] fn vrshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -12382,7 +13222,7 @@ vrshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v4i16")] fn vrshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -12398,7 +13238,7 @@ vrshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v8i16")] fn vrshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -12414,7 +13254,7 @@ vrshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v2i32")] fn vrshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -12430,7 +13270,7 @@ vrshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v4i32")] fn vrshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -12446,7 +13286,7 @@ vrshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v1i64")] fn vrshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t; @@ -12462,7 +13302,7 @@ vrshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v2i64")] fn vrshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; @@ -12478,7 +13318,7 @@ vrshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v8i8")] fn vrshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; @@ -12494,7 +13334,7 @@ vrshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v16i8")] fn vrshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; @@ -12510,7 +13350,7 @@ vrshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v4i16")] fn vrshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; @@ -12526,7 +13366,7 @@ vrshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v8i16")] fn vrshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; @@ -12542,7 +13382,7 @@ vrshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v2i32")] fn vrshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; @@ -12558,7 +13398,7 @@ vrshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v4i32")] fn vrshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; @@ -12574,7 +13414,7 @@ vrshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v1i64")] fn vrshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; @@ -12590,7 +13430,7 @@ vrshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v2i64")] fn vrshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; @@ -12799,7 +13639,7 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn vrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t; } @@ -12815,7 +13655,7 @@ vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16 pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v8i8")] fn vrshrn_n_s16_(a: int16x8_t, n: i32) -> int8x8_t; } @@ -12831,7 +13671,7 @@ vrshrn_n_s16_(a, N) pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn vrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t; } @@ -12847,7 +13687,7 @@ vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32)) pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v4i16")] fn vrshrn_n_s32_(a: int32x4_t, n: i32) -> int16x4_t; } @@ -12863,7 +13703,7 @@ vrshrn_n_s32_(a, N) pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn vrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t; } @@ -12879,7 +13719,7 @@ vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64)) pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v2i32")] fn vrshrn_n_s64_(a: int64x2_t, n: i32) -> int32x2_t; } @@ -13410,7 +14250,7 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v8i8")] fn vshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t; @@ -13426,7 +14266,7 @@ vshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v16i8")] fn vshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t; @@ -13442,7 +14282,7 @@ vshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v4i16")] fn vshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t; @@ -13458,7 +14298,7 @@ vshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v8i16")] fn vshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t; @@ -13474,7 +14314,7 @@ vshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v2i32")] fn vshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t; @@ -13490,7 +14330,7 @@ vshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v4i32")] fn vshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t; @@ -13506,7 +14346,7 @@ vshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v1i64")] fn vshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t; @@ -13522,7 +14362,7 @@ vshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v2i64")] fn vshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t; @@ -13538,7 +14378,7 @@ vshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v8i8")] fn vshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; @@ -13554,7 +14394,7 @@ vshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v16i8")] fn vshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; @@ -13570,7 +14410,7 @@ vshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v4i16")] fn vshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; @@ -13586,7 +14426,7 @@ vshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v8i16")] fn vshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; @@ -13602,7 +14442,7 @@ vshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v2i32")] fn vshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; @@ -13618,7 +14458,7 @@ vshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v4i32")] fn vshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; @@ -13634,7 +14474,7 @@ vshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v1i64")] fn vshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; @@ -13650,7 +14490,7 @@ vshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v2i64")] fn vshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; @@ -14455,7 +15295,7 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v8i8")] fn vqabs_s8_(a: int8x8_t) -> int8x8_t; @@ -14471,7 +15311,7 @@ vqabs_s8_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v16i8")] fn vqabsq_s8_(a: int8x16_t) -> int8x16_t; @@ -14487,7 +15327,7 @@ vqabsq_s8_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v4i16")] fn vqabs_s16_(a: int16x4_t) -> int16x4_t; @@ -14503,7 +15343,7 @@ vqabs_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v8i16")] fn vqabsq_s16_(a: int16x8_t) -> int16x8_t; @@ -14519,7 +15359,7 @@ vqabsq_s16_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v2i32")] fn vqabs_s32_(a: int32x2_t) -> int32x2_t; @@ -14535,7 +15375,7 @@ vqabs_s32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] - extern "C" { + extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v4i32")] fn vqabsq_s32_(a: int32x4_t) -> int32x4_t; @@ -18961,6 +19801,534 @@ mod test { assert_eq!(r, e); } + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s8_x2() { + let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i8x8; 2] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [i8x8; 2] = transmute(vld1_s8_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s16_x2() { + let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [i16x4; 2] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8)]; + let r: [i16x4; 2] = transmute(vld1_s16_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s32_x2() { + let a: [i32; 5] = [0, 1, 2, 3, 4]; + let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(3, 4)]; + let r: [i32x2; 2] = transmute(vld1_s32_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s64_x2() { + let a: [i64; 3] = [0, 1, 2]; + let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let r: [i64x1; 2] = transmute(vld1_s64_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s8_x2() { + let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i8x16; 2] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i8x16; 2] = transmute(vld1q_s8_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s16_x2() { + let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i16x8; 2] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [i16x8; 2] = transmute(vld1q_s16_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s32_x2() { + let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [i32x4; 2] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8)]; + let r: [i32x4; 2] = transmute(vld1q_s32_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s64_x2() { + let a: [i64; 5] = [0, 1, 2, 3, 4]; + let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(3, 4)]; + let r: [i64x2; 2] = transmute(vld1q_s64_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s8_x3() { + let a: [i8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; + let e: [i8x8; 3] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let r: [i8x8; 3] = transmute(vld1_s8_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s16_x3() { + let a: [i16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let e: [i16x4; 3] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12)]; + let r: [i16x4; 3] = transmute(vld1_s16_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s32_x3() { + let a: [i32; 7] = [0, 1, 2, 3, 4, 5, 6]; + let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(3, 4), i32x2::new(5, 6)]; + let r: [i32x2; 3] = transmute(vld1_s32_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s64_x3() { + let a: [i64; 4] = [0, 1, 2, 3]; + let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(3)]; + let r: [i64x1; 3] = transmute(vld1_s64_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s8_x3() { + let a: [i8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i8x16; 3] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [i8x16; 3] = transmute(vld1q_s8_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s16_x3() { + let a: [i16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; + let e: [i16x8; 3] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let r: [i16x8; 3] = transmute(vld1q_s16_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s32_x3() { + let a: [i32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let e: [i32x4; 3] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8), i32x4::new(9, 10, 11, 12)]; + let r: [i32x4; 3] = transmute(vld1q_s32_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s64_x3() { + let a: [i64; 7] = [0, 1, 2, 3, 4, 5, 6]; + let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6)]; + let r: [i64x2; 3] = transmute(vld1q_s64_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s8_x4() { + let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i8x8; 4] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24), i8x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i8x8; 4] = transmute(vld1_s8_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s16_x4() { + let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i16x4; 4] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12), i16x4::new(13, 14, 15, 16)]; + let r: [i16x4; 4] = transmute(vld1_s16_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s32_x4() { + let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(3, 4), i32x2::new(5, 6), i32x2::new(7, 8)]; + let r: [i32x2; 4] = transmute(vld1_s32_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_s64_x4() { + let a: [i64; 5] = [0, 1, 2, 3, 4]; + let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(3), i64x1::new(4)]; + let r: [i64x1; 4] = transmute(vld1_s64_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s8_x4() { + let a: [i8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i8x16; 4] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i8x16; 4] = transmute(vld1q_s8_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s16_x4() { + let a: [i16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i16x8; 4] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24), i16x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i16x8; 4] = transmute(vld1q_s16_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s32_x4() { + let a: [i32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i32x4; 4] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8), i32x4::new(9, 10, 11, 12), i32x4::new(13, 14, 15, 16)]; + let r: [i32x4; 4] = transmute(vld1q_s32_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_s64_x4() { + let a: [i64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6), i64x2::new(7, 8)]; + let r: [i64x2; 4] = transmute(vld1q_s64_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u8_x2() { + let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [u8x8; 2] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [u8x8; 2] = transmute(vld1_u8_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u16_x2() { + let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [u16x4; 2] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8)]; + let r: [u16x4; 2] = transmute(vld1_u16_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u32_x2() { + let a: [u32; 5] = [0, 1, 2, 3, 4]; + let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(3, 4)]; + let r: [u32x2; 2] = transmute(vld1_u32_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u64_x2() { + let a: [u64; 3] = [0, 1, 2]; + let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)]; + let r: [u64x1; 2] = transmute(vld1_u64_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u8_x2() { + let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [u8x16; 2] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [u8x16; 2] = transmute(vld1q_u8_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u16_x2() { + let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [u16x8; 2] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [u16x8; 2] = transmute(vld1q_u16_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u32_x2() { + let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [u32x4; 2] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8)]; + let r: [u32x4; 2] = transmute(vld1q_u32_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u64_x2() { + let a: [u64; 5] = [0, 1, 2, 3, 4]; + let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(3, 4)]; + let r: [u64x2; 2] = transmute(vld1q_u64_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u8_x3() { + let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; + let e: [u8x8; 3] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16), u8x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let r: [u8x8; 3] = transmute(vld1_u8_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u16_x3() { + let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let e: [u16x4; 3] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8), u16x4::new(9, 10, 11, 12)]; + let r: [u16x4; 3] = transmute(vld1_u16_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u32_x3() { + let a: [u32; 7] = [0, 1, 2, 3, 4, 5, 6]; + let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(3, 4), u32x2::new(5, 6)]; + let r: [u32x2; 3] = transmute(vld1_u32_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u64_x3() { + let a: [u64; 4] = [0, 1, 2, 3]; + let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(3)]; + let r: [u64x1; 3] = transmute(vld1_u64_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u8_x3() { + let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [u8x16; 3] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [u8x16; 3] = transmute(vld1q_u8_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u16_x3() { + let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; + let e: [u16x8; 3] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16), u16x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let r: [u16x8; 3] = transmute(vld1q_u16_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u32_x3() { + let a: [u32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let e: [u32x4; 3] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8), u32x4::new(9, 10, 11, 12)]; + let r: [u32x4; 3] = transmute(vld1q_u32_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u64_x3() { + let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6]; + let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(3, 4), u64x2::new(5, 6)]; + let r: [u64x2; 3] = transmute(vld1q_u64_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u8_x4() { + let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [u8x8; 4] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16), u8x8::new(17, 18, 19, 20, 21, 22, 23, 24), u8x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [u8x8; 4] = transmute(vld1_u8_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u16_x4() { + let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [u16x4; 4] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8), u16x4::new(9, 10, 11, 12), u16x4::new(13, 14, 15, 16)]; + let r: [u16x4; 4] = transmute(vld1_u16_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u32_x4() { + let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(3, 4), u32x2::new(5, 6), u32x2::new(7, 8)]; + let r: [u32x2; 4] = transmute(vld1_u32_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_u64_x4() { + let a: [u64; 5] = [0, 1, 2, 3, 4]; + let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(3), u64x1::new(4)]; + let r: [u64x1; 4] = transmute(vld1_u64_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u8_x4() { + let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [u8x16; 4] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [u8x16; 4] = transmute(vld1q_u8_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u16_x4() { + let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [u16x8; 4] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16), u16x8::new(17, 18, 19, 20, 21, 22, 23, 24), u16x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [u16x8; 4] = transmute(vld1q_u16_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u32_x4() { + let a: [u32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [u32x4; 4] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8), u32x4::new(9, 10, 11, 12), u32x4::new(13, 14, 15, 16)]; + let r: [u32x4; 4] = transmute(vld1q_u32_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_u64_x4() { + let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(3, 4), u64x2::new(5, 6), u64x2::new(7, 8)]; + let r: [u64x2; 4] = transmute(vld1q_u64_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_p8_x2() { + let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i8x8; 2] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [i8x8; 2] = transmute(vld1_p8_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_p8_x3() { + let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; + let e: [i8x8; 3] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let r: [i8x8; 3] = transmute(vld1_p8_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_p8_x4() { + let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i8x8; 4] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24), i8x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i8x8; 4] = transmute(vld1_p8_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_p8_x2() { + let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i8x16; 2] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i8x16; 2] = transmute(vld1q_p8_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_p8_x3() { + let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i8x16; 3] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [i8x16; 3] = transmute(vld1q_p8_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_p8_x4() { + let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i8x16; 4] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i8x16; 4] = transmute(vld1q_p8_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_p16_x2() { + let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let e: [i16x4; 2] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8)]; + let r: [i16x4; 2] = transmute(vld1_p16_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_p16_x3() { + let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let e: [i16x4; 3] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12)]; + let r: [i16x4; 3] = transmute(vld1_p16_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_p16_x4() { + let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i16x4; 4] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12), i16x4::new(13, 14, 15, 16)]; + let r: [i16x4; 4] = transmute(vld1_p16_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_p16_x2() { + let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let e: [i16x8; 2] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let r: [i16x8; 2] = transmute(vld1q_p16_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_p16_x3() { + let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; + let e: [i16x8; 3] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let r: [i16x8; 3] = transmute(vld1q_p16_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_p16_x4() { + let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; + let e: [i16x8; 4] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24), i16x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let r: [i16x8; 4] = transmute(vld1q_p16_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_f32_x2() { + let a: [f32; 5] = [0., 1., 2., 3., 4.]; + let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(3., 4.)]; + let r: [f32x2; 2] = transmute(vld1_f32_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_f32_x2() { + let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; + let e: [f32x4; 2] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.)]; + let r: [f32x4; 2] = transmute(vld1q_f32_x2(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_f32_x3() { + let a: [f32; 7] = [0., 1., 2., 3., 4., 5., 6.]; + let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(3., 4.), f32x2::new(5., 6.)]; + let r: [f32x2; 3] = transmute(vld1_f32_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_f32_x3() { + let a: [f32; 13] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let e: [f32x4; 3] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.), f32x4::new(9., 10., 11., 12.)]; + let r: [f32x4; 3] = transmute(vld1q_f32_x3(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1_f32_x4() { + let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; + let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(3., 4.), f32x2::new(5., 6.), f32x2::new(7., 8.)]; + let r: [f32x2; 4] = transmute(vld1_f32_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + + #[simd_test(enable = "neon")] + unsafe fn test_vld1q_f32_x4() { + let a: [f32; 17] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]; + let e: [f32x4; 4] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.), f32x4::new(9., 10., 11., 12.), f32x4::new(13., 14., 15., 16.)]; + let r: [f32x4; 4] = transmute(vld1q_f32_x4(a[1..].as_ptr())); + assert_eq!(r, e); + } + #[simd_test(enable = "neon")] unsafe fn test_vmul_s8() { let a: i8x8 = i8x8::new(1, 2, 1, 2, 1, 2, 1, 2); diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index 9ed9f77aa775..369bf07e1828 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -92,6 +92,16 @@ pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t); #[derive(Copy, Clone)] pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t); +/// ARM-specific type containing two `int8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); +/// ARM-specific type containing three `int8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); +/// ARM-specific type containing four `int8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t); + /// ARM-specific type containing two `uint8x8_t` vectors. #[derive(Copy, Clone)] pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t); @@ -102,6 +112,21 @@ pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); #[derive(Copy, Clone)] pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); +/// ARM-specific type containing two `uint8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); +/// ARM-specific type containing three `uint8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); +/// ARM-specific type containing four `uint8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x16x4_t( + pub uint8x16_t, + pub uint8x16_t, + pub uint8x16_t, + pub uint8x16_t, +); + /// ARM-specific type containing two `poly8x8_t` vectors. #[derive(Copy, Clone)] pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t); @@ -112,8 +137,233 @@ pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); #[derive(Copy, Clone)] pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); +/// ARM-specific type containing two `poly8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); +/// ARM-specific type containing three `poly8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); +/// ARM-specific type containing four `poly8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x16x4_t( + pub poly8x16_t, + pub poly8x16_t, + pub poly8x16_t, + pub poly8x16_t, +); + +/// ARM-specific type containing two `int16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t); +/// ARM-specific type containing three `int16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t); +/// ARM-specific type containing four `int16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t); + +/// ARM-specific type containing two `int16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t); +/// ARM-specific type containing three `int16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t); +/// ARM-specific type containing four `int16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t); + +/// ARM-specific type containing two `uint16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t); +/// ARM-specific type containing three `uint16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t); +/// ARM-specific type containing four `uint16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct uint16x4x4_t( + pub uint16x4_t, + pub uint16x4_t, + pub uint16x4_t, + pub uint16x4_t, +); + +/// ARM-specific type containing two `uint16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t); +/// ARM-specific type containing three `uint16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t); +/// ARM-specific type containing four `uint16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct uint16x8x4_t( + pub uint16x8_t, + pub uint16x8_t, + pub uint16x8_t, + pub uint16x8_t, +); + +/// ARM-specific type containing two `poly16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t); +/// ARM-specific type containing three `poly16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t); +/// ARM-specific type containing four `poly16x4_t` vectors. +#[derive(Copy, Clone)] +pub struct poly16x4x4_t( + pub poly16x4_t, + pub poly16x4_t, + pub poly16x4_t, + pub poly16x4_t, +); + +/// ARM-specific type containing two `poly16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t); +/// ARM-specific type containing three `poly16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t); +/// ARM-specific type containing four `poly16x8_t` vectors. +#[derive(Copy, Clone)] +pub struct poly16x8x4_t( + pub poly16x8_t, + pub poly16x8_t, + pub poly16x8_t, + pub poly16x8_t, +); + +/// ARM-specific type containing two `int32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t); +/// ARM-specific type containing three `int32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t); +/// ARM-specific type containing four `int32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t); + +/// ARM-specific type containing two `int32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t); +/// ARM-specific type containing three `int32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t); +/// ARM-specific type containing four `int32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t); + +/// ARM-specific type containing two `uint32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t); +/// ARM-specific type containing three `uint32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t); +/// ARM-specific type containing four `uint32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct uint32x2x4_t( + pub uint32x2_t, + pub uint32x2_t, + pub uint32x2_t, + pub uint32x2_t, +); + +/// ARM-specific type containing two `uint32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t); +/// ARM-specific type containing three `uint32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t); +/// ARM-specific type containing four `uint32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct uint32x4x4_t( + pub uint32x4_t, + pub uint32x4_t, + pub uint32x4_t, + pub uint32x4_t, +); + +/// ARM-specific type containing two `float32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t); +/// ARM-specific type containing three `float32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t); +/// ARM-specific type containing four `float32x2_t` vectors. +#[derive(Copy, Clone)] +pub struct float32x2x4_t( + pub float32x2_t, + pub float32x2_t, + pub float32x2_t, + pub float32x2_t, +); + +/// ARM-specific type containing two `float32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t); +/// ARM-specific type containing three `float32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t); +/// ARM-specific type containing four `float32x4_t` vectors. +#[derive(Copy, Clone)] +pub struct float32x4x4_t( + pub float32x4_t, + pub float32x4_t, + pub float32x4_t, + pub float32x4_t, +); + +/// ARM-specific type containing four `int64x1_t` vectors. +#[derive(Copy, Clone)] +pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t); +/// ARM-specific type containing four `int64x1_t` vectors. +#[derive(Copy, Clone)] +pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t); +/// ARM-specific type containing four `int64x1_t` vectors. +#[derive(Copy, Clone)] +pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t); + +/// ARM-specific type containing four `int64x2_t` vectors. +#[derive(Copy, Clone)] +pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t); +/// ARM-specific type containing four `int64x2_t` vectors. +#[derive(Copy, Clone)] +pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t); +/// ARM-specific type containing four `int64x2_t` vectors. +#[derive(Copy, Clone)] +pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t); + +/// ARM-specific type containing four `uint64x1_t` vectors. +#[derive(Copy, Clone)] +pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t); +/// ARM-specific type containing four `uint64x1_t` vectors. +#[derive(Copy, Clone)] +pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t); +/// ARM-specific type containing four `uint64x1_t` vectors. +#[derive(Copy, Clone)] +pub struct uint64x1x4_t( + pub uint64x1_t, + pub uint64x1_t, + pub uint64x1_t, + pub uint64x1_t, +); + +/// ARM-specific type containing four `uint64x2_t` vectors. +#[derive(Copy, Clone)] +pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t); +/// ARM-specific type containing four `uint64x2_t` vectors. +#[derive(Copy, Clone)] +pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t); +/// ARM-specific type containing four `uint64x2_t` vectors. +#[derive(Copy, Clone)] +pub struct uint64x2x4_t( + pub uint64x2_t, + pub uint64x2_t, + pub uint64x2_t, + pub uint64x2_t, +); + #[allow(improper_ctypes)] -extern "C" { +extern "unadjusted" { // absolute value (64-bit) #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.abs.v8i8")] @@ -2867,11 +3117,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 1))] -// Based on the discussion in https://github.com/rust-lang/stdarch/pull/792 -// `mov` seems to be an acceptable intrinsic to compile to -// #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(vmov, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -2882,10 +3128,7 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, IMM5 = 0))] -// FIXME: no 32bit this seems to be turned into two vmov.32 instructions -// validate correctness +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, 0) @@ -2896,8 +3139,7 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u16", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -2908,8 +3150,7 @@ pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.s16", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -2920,8 +3161,7 @@ pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u16", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -2932,8 +3172,7 @@ pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -2944,8 +3183,7 @@ pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -2956,8 +3194,7 @@ pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.f32", IMM5 = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -2968,8 +3205,7 @@ pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.f32", IMM5 = 1))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 1))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -2980,8 +3216,7 @@ pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, IMM5 = 0))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -2992,8 +3227,7 @@ pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, IMM5 = 0))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3004,8 +3238,7 @@ pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, IMM5 = 0))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { static_assert!(IMM5 : i32 where IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -3016,8 +3249,7 @@ pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 0))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov, IMM5 = 0))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { static_assert_imm1!(IMM5); simd_extract(v, IMM5 as u32) @@ -3028,8 +3260,7 @@ pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u16", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3040,8 +3271,7 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3052,8 +3282,7 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.s16", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3064,8 +3293,7 @@ pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u16", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3076,8 +3304,7 @@ pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.32", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { static_assert_imm2!(IMM5); simd_extract(v, IMM5 as u32) @@ -3088,8 +3315,7 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u8", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3100,8 +3326,7 @@ pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.s8", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3112,8 +3337,7 @@ pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u8", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { static_assert_imm3!(IMM5); simd_extract(v, IMM5 as u32) @@ -3124,8 +3348,7 @@ pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u8", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3136,8 +3359,7 @@ pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.s8", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3148,8 +3370,7 @@ pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov.u8", IMM5 = 2))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umov, IMM5 = 2))] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { static_assert_imm4!(IMM5); simd_extract(v, IMM5 as u32) @@ -3269,8 +3490,7 @@ pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -3279,8 +3499,7 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3289,8 +3508,7 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -3299,8 +3517,7 @@ pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 0)) } @@ -3309,8 +3526,7 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -3319,8 +3535,7 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3329,8 +3544,7 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle2!(a, a, [0, 1]) } @@ -3339,8 +3553,7 @@ pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 0)) } @@ -3349,8 +3562,7 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -3359,8 +3571,7 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle4!(a, a, [0, 1, 2, 3]) } @@ -3369,8 +3580,7 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("ldr"))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ldr))] +#[cfg_attr(test, assert_instr(nop))] pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { simd_shuffle2!(a, a, [0, 1]) } diff --git a/library/stdarch/crates/stdarch-gen/neon.spec b/library/stdarch/crates/stdarch-gen/neon.spec index 5850be482b76..2e2e3cee40b2 100644 --- a/library/stdarch/crates/stdarch-gen/neon.spec +++ b/library/stdarch/crates/stdarch-gen/neon.spec @@ -2033,6 +2033,81 @@ aarch64 = sqadd link-aarch64 = sqadd._EXT_ generate i32, i64 +/// Load multiple single-element structures to one, two, three, or four registers +name = vld1 +out-suffix +a = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 +validate 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 +test = load_test + +aarch64 = ld1 +link-aarch64 = ld1x2._EXT2_ +arm = vld1 +link-arm = vld1x2._EXT2_ +generate *const i8:int8x8x2_t, *const i16:int16x4x2_t, *const i32:int32x2x2_t, *const i64:int64x1x2_t +generate *const i8:int8x16x2_t, *const i16:int16x8x2_t, *const i32:int32x4x2_t, *const i64:int64x2x2_t + +link-aarch64 = ld1x3._EXT2_ +link-arm = vld1x3._EXT2_ +generate *const i8:int8x8x3_t, *const i16:int16x4x3_t, *const i32:int32x2x3_t, *const i64:int64x1x3_t +generate *const i8:int8x16x3_t, *const i16:int16x8x3_t, *const i32:int32x4x3_t, *const i64:int64x2x3_t + +link-aarch64 = ld1x4._EXT2_ +link-arm = vld1x4._EXT2_ +generate *const i8:int8x8x4_t, *const i16:int16x4x4_t, *const i32:int32x2x4_t, *const i64:int64x1x4_t +generate *const i8:int8x16x4_t, *const i16:int16x8x4_t, *const i32:int32x4x4_t, *const i64:int64x2x4_t + +/// Load multiple single-element structures to one, two, three, or four registers +name = vld1 +out-suffix +multi_fn = transmute, {vld1-outsigned-noext, transmute(a)} +a = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 +validate 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 + +test = load_test +aarch64 = ld1 +arm = vld1 +generate *const u8:uint8x8x2_t, *const u16:uint16x4x2_t, *const u32:uint32x2x2_t, *const u64:uint64x1x2_t +generate *const u8:uint8x16x2_t, *const u16:uint16x8x2_t, *const u32:uint32x4x2_t, *const u64:uint64x2x2_t +generate *const u8:uint8x8x3_t, *const u16:uint16x4x3_t, *const u32:uint32x2x3_t, *const u64:uint64x1x3_t +generate *const u8:uint8x16x3_t, *const u16:uint16x8x3_t, *const u32:uint32x4x3_t, *const u64:uint64x2x3_t +generate *const u8:uint8x8x4_t, *const u16:uint16x4x4_t, *const u32:uint32x2x4_t, *const u64:uint64x1x4_t +generate *const u8:uint8x16x4_t, *const u16:uint16x8x4_t, *const u32:uint32x4x4_t, *const u64:uint64x2x4_t +generate *const p8:poly8x8x2_t, *const p8:poly8x8x3_t, *const p8:poly8x8x4_t +generate *const p8:poly8x16x2_t, *const p8:poly8x16x3_t, *const p8:poly8x16x4_t +generate *const p16:poly16x4x2_t, *const p16:poly16x4x3_t, *const p16:poly16x4x4_t +generate *const p16:poly16x8x2_t, *const p16:poly16x8x3_t, *const p16:poly16x8x4_t + +/// Load multiple single-element structures to one, two, three, or four registers +name = vld1 +out-suffix +a = 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16. +validate 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16. +test = load_test + +aarch64 = ld1 +link-aarch64 = ld1x2._EXT2_ +generate *const f64:float64x1x2_t, *const f64:float64x2x2_t + +link-aarch64 = ld1x3._EXT2_ +generate *const f64:float64x1x3_t, *const f64:float64x2x3_t + +link-aarch64 = ld1x4._EXT2_ +generate *const f64:float64x1x4_t, *const f64:float64x2x4_t + +arm = vld1 +link-aarch64 = ld1x2._EXT2_ +link-arm = vld1x2._EXT2_ +generate *const f32:float32x2x2_t, *const f32:float32x4x2_t + +link-aarch64 = ld1x3._EXT2_ +link-arm = vld1x3._EXT2_ +generate *const f32:float32x2x3_t, *const f32:float32x4x3_t + +link-aarch64 = ld1x4._EXT2_ +link-arm = vld1x4._EXT2_ +generate *const f32:float32x2x4_t, *const f32:float32x4x4_t + /// Multiply name = vmul a = 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2 diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs index 7f506acb7755..493eb05a24b6 100644 --- a/library/stdarch/crates/stdarch-gen/src/main.rs +++ b/library/stdarch/crates/stdarch-gen/src/main.rs @@ -51,38 +51,34 @@ const FLOAT_TYPES_64: [&str; 2] = [ ]; fn type_len(t: &str) -> usize { - match t { - "int8x8_t" => 8, - "int8x16_t" => 16, - "int16x4_t" => 4, - "int16x8_t" => 8, - "int32x2_t" => 2, - "int32x4_t" => 4, - "int64x1_t" => 1, - "int64x2_t" => 2, - "uint8x8_t" => 8, - "uint8x16_t" => 16, - "uint16x4_t" => 4, - "uint16x8_t" => 8, - "uint32x2_t" => 2, - "uint32x4_t" => 4, - "uint64x1_t" => 1, - "uint64x2_t" => 2, - "float16x4_t" => 4, - "float16x8_t" => 8, - "float32x2_t" => 2, - "float32x4_t" => 4, - "float64x1_t" => 1, - "float64x2_t" => 2, - "poly8x8_t" => 8, - "poly8x16_t" => 16, - "poly16x4_t" => 4, - "poly16x8_t" => 8, - "poly64x1_t" => 1, - "poly64x2_t" => 2, - "i8" | "i16" | "i32" | "i64" | "u8" | "u16" | "u32" | "u64" | "f32" | "f64" | "p8" - | "p16" | "p64" | "p128" => 1, - _ => panic!("unknown type: {}", t), + let s: Vec<_> = t.split("x").collect(); + if s.len() == 2 { + match &s[1][0..2] { + "1_" => 1, + "2_" => 2, + "4_" => 4, + "8_" => 8, + "16" => 16, + _ => panic!("unknown type: {}", t), + } + } else if s.len() == 3 { + s[1].parse::().unwrap() * type_sub_len(t) + } else { + 1 + } +} + +fn type_sub_len(t: &str) -> usize { + let s: Vec<_> = t.split('x').collect(); + if s.len() != 3 { + 1 + } else { + match s[2] { + "2_t" => 2, + "3_t" => 3, + "4_t" => 4, + _ => panic!("unknown type len: {}", t), + } } } @@ -177,6 +173,84 @@ fn type_to_suffix(t: &str) -> &str { "poly16x8_t" => "q_p16", "poly64x1_t" => "_p64", "poly64x2_t" => "q_p64", + "int8x8x2_t" => "_s8_x2", + "int8x8x3_t" => "_s8_x3", + "int8x8x4_t" => "_s8_x4", + "int16x4x2_t" => "_s16_x2", + "int16x4x3_t" => "_s16_x3", + "int16x4x4_t" => "_s16_x4", + "int32x2x2_t" => "_s32_x2", + "int32x2x3_t" => "_s32_x3", + "int32x2x4_t" => "_s32_x4", + "int64x1x2_t" => "_s64_x2", + "int64x1x3_t" => "_s64_x3", + "int64x1x4_t" => "_s64_x4", + "uint8x8x2_t" => "_u8_x2", + "uint8x8x3_t" => "_u8_x3", + "uint8x8x4_t" => "_u8_x4", + "uint16x4x2_t" => "_u16_x2", + "uint16x4x3_t" => "_u16_x3", + "uint16x4x4_t" => "_u16_x4", + "uint32x2x2_t" => "_u32_x2", + "uint32x2x3_t" => "_u32_x3", + "uint32x2x4_t" => "_u32_x4", + "uint64x1x2_t" => "_u64_x2", + "uint64x1x3_t" => "_u64_x3", + "uint64x1x4_t" => "_u64_x4", + "poly8x8x2_t" => "_p8_x2", + "poly8x8x3_t" => "_p8_x3", + "poly8x8x4_t" => "_p8_x4", + "poly16x4x2_t" => "_p16_x2", + "poly16x4x3_t" => "_p16_x3", + "poly16x4x4_t" => "_p16_x4", + "poly64x1x2_t" => "_p64_x2", + "poly64x1x3_t" => "_p64_x3", + "poly64x1x4_t" => "_p64_x4", + "float32x2x2_t" => "_f32_x2", + "float32x2x3_t" => "_f32_x3", + "float32x2x4_t" => "_f32_x4", + "float64x1x2_t" => "_f64_x2", + "float64x1x3_t" => "_f64_x3", + "float64x1x4_t" => "_f64_x4", + "int8x16x2_t" => "q_s8_x2", + "int8x16x3_t" => "q_s8_x3", + "int8x16x4_t" => "q_s8_x4", + "int16x8x2_t" => "q_s16_x2", + "int16x8x3_t" => "q_s16_x3", + "int16x8x4_t" => "q_s16_x4", + "int32x4x2_t" => "q_s32_x2", + "int32x4x3_t" => "q_s32_x3", + "int32x4x4_t" => "q_s32_x4", + "int64x2x2_t" => "q_s64_x2", + "int64x2x3_t" => "q_s64_x3", + "int64x2x4_t" => "q_s64_x4", + "uint8x16x2_t" => "q_u8_x2", + "uint8x16x3_t" => "q_u8_x3", + "uint8x16x4_t" => "q_u8_x4", + "uint16x8x2_t" => "q_u16_x2", + "uint16x8x3_t" => "q_u16_x3", + "uint16x8x4_t" => "q_u16_x4", + "uint32x4x2_t" => "q_u32_x2", + "uint32x4x3_t" => "q_u32_x3", + "uint32x4x4_t" => "q_u32_x4", + "uint64x2x2_t" => "q_u64_x2", + "uint64x2x3_t" => "q_u64_x3", + "uint64x2x4_t" => "q_u64_x4", + "poly8x16x2_t" => "q_p8_x2", + "poly8x16x3_t" => "q_p8_x3", + "poly8x16x4_t" => "q_p8_x4", + "poly16x8x2_t" => "q_p16_x2", + "poly16x8x3_t" => "q_p16_x3", + "poly16x8x4_t" => "q_p16_x4", + "poly64x2x2_t" => "q_p64_x2", + "poly64x2x3_t" => "q_p64_x3", + "poly64x2x4_t" => "q_p64_x4", + "float32x4x2_t" => "q_f32_x2", + "float32x4x3_t" => "q_f32_x3", + "float32x4x4_t" => "q_f32_x4", + "float64x2x2_t" => "q_f64_x2", + "float64x2x3_t" => "q_f64_x3", + "float64x2x4_t" => "q_f64_x4", "i8" => "b_s8", "i16" => "h_s16", "i32" => "s_s32", @@ -274,18 +348,10 @@ fn type_to_lane_suffixes<'a>(out_t: &'a str, in_t: &'a str) -> String { str } -fn type_to_signed(t: &str) -> &str { - match t { - "int8x8_t" | "uint8x8_t" | "poly8x8_t" => "int8x8_t", - "int8x16_t" | "uint8x16_t" | "poly8x16_t" => "int8x16_t", - "int16x4_t" | "uint16x4_t" | "poly16x4_t" => "int16x4_t", - "int16x8_t" | "uint16x8_t" | "poly16x8_t" => "int16x8_t", - "int32x2_t" | "uint32x2_t" => "int32x2_t", - "int32x4_t" | "uint32x4_t" => "int32x4_t", - "int64x1_t" | "uint64x1_t" | "poly64x1_t" => "int64x1_t", - "int64x2_t" | "uint64x2_t" | "poly64x2_t" => "int64x2_t", - _ => panic!("unknown type: {}", t), - } +fn type_to_signed(t: &String) -> String { + let s = t.replace("uint", "int"); + let s = s.replace("poly", "int"); + s } fn type_to_unsigned(t: &str) -> &str { @@ -384,34 +450,34 @@ enum TargetFeature { fn type_to_global_type(t: &str) -> &str { match t { - "int8x8_t" => "i8x8", - "int8x16_t" => "i8x16", - "int16x4_t" => "i16x4", - "int16x8_t" => "i16x8", - "int32x2_t" => "i32x2", - "int32x4_t" => "i32x4", - "int64x1_t" => "i64x1", - "int64x2_t" => "i64x2", - "uint8x8_t" => "u8x8", - "uint8x16_t" => "u8x16", - "uint16x4_t" => "u16x4", - "uint16x8_t" => "u16x8", - "uint32x2_t" => "u32x2", - "uint32x4_t" => "u32x4", - "uint64x1_t" => "u64x1", - "uint64x2_t" => "u64x2", + "int8x8_t" | "int8x8x2_t" | "int8x8x3_t" | "int8x8x4_t" => "i8x8", + "int8x16_t" | "int8x16x2_t" | "int8x16x3_t" | "int8x16x4_t" => "i8x16", + "int16x4_t" | "int16x4x2_t" | "int16x4x3_t" | "int16x4x4_t" => "i16x4", + "int16x8_t" | "int16x8x2_t" | "int16x8x3_t" | "int16x8x4_t" => "i16x8", + "int32x2_t" | "int32x2x2_t" | "int32x2x3_t" | "int32x2x4_t" => "i32x2", + "int32x4_t" | "int32x4x2_t" | "int32x4x3_t" | "int32x4x4_t" => "i32x4", + "int64x1_t" | "int64x1x2_t" | "int64x1x3_t" | "int64x1x4_t" => "i64x1", + "int64x2_t" | "int64x2x2_t" | "int64x2x3_t" | "int64x2x4_t" => "i64x2", + "uint8x8_t" | "uint8x8x2_t" | "uint8x8x3_t" | "uint8x8x4_t" => "u8x8", + "uint8x16_t" | "uint8x16x2_t" | "uint8x16x3_t" | "uint8x16x4_t" => "u8x16", + "uint16x4_t" | "uint16x4x2_t" | "uint16x4x3_t" | "uint16x4x4_t" => "u16x4", + "uint16x8_t" | "uint16x8x2_t" | "uint16x8x3_t" | "uint16x8x4_t" => "u16x8", + "uint32x2_t" | "uint32x2x2_t" | "uint32x2x3_t" | "uint32x2x4_t" => "u32x2", + "uint32x4_t" | "uint32x4x2_t" | "uint32x4x3_t" | "uint32x4x4_t" => "u32x4", + "uint64x1_t" | "uint64x1x2_t" | "uint64x1x3_t" | "uint64x1x4_t" => "u64x1", + "uint64x2_t" | "uint64x2x2_t" | "uint64x2x3_t" | "uint64x2x4_t" => "u64x2", "float16x4_t" => "f16x4", "float16x8_t" => "f16x8", - "float32x2_t" => "f32x2", - "float32x4_t" => "f32x4", - "float64x1_t" => "f64", - "float64x2_t" => "f64x2", - "poly8x8_t" => "i8x8", - "poly8x16_t" => "i8x16", - "poly16x4_t" => "i16x4", - "poly16x8_t" => "i16x8", - "poly64x1_t" => "i64x1", - "poly64x2_t" => "i64x2", + "float32x2_t" | "float32x2x2_t" | "float32x2x3_t" | "float32x2x4_t" => "f32x2", + "float32x4_t" | "float32x4x2_t" | "float32x4x3_t" | "float32x4x4_t" => "f32x4", + "float64x1_t" | "float64x1x2_t" | "float64x1x3_t" | "float64x1x4_t" => "f64", + "float64x2_t" | "float64x2x2_t" | "float64x2x3_t" | "float64x2x4_t" => "f64x2", + "poly8x8_t" | "poly8x8x2_t" | "poly8x8x3_t" | "poly8x8x4_t" => "i8x8", + "poly8x16_t" | "poly8x16x2_t" | "poly8x16x3_t" | "poly8x16x4_t" => "i8x16", + "poly16x4_t" | "poly16x4x2_t" | "poly16x4x3_t" | "poly16x4x4_t" => "i16x4", + "poly16x8_t" | "poly16x8x2_t" | "poly16x8x3_t" | "poly16x8x4_t" => "i16x8", + "poly64x1_t" | "poly64x1x2_t" | "poly64x1x3_t" | "poly64x1x4_t" => "i64x1", + "poly64x2_t" | "poly64x2x2_t" | "poly64x2x3_t" | "poly64x2x4_t" => "i64x2", "i8" => "i8", "i16" => "i16", "i32" => "i32", @@ -432,18 +498,33 @@ fn type_to_global_type(t: &str) -> &str { fn type_to_native_type(t: &str) -> &str { match t { - "int8x8_t" | "int8x16_t" | "i8" => "i8", - "int16x4_t" | "int16x8_t" | "i16" => "i16", - "int32x2_t" | "int32x4_t" | "i32" => "i32", - "int64x1_t" | "int64x2_t" | "i64" => "i64", - "uint8x8_t" | "uint8x16_t" | "u8" => "u8", - "uint16x4_t" | "uint16x8_t" | "u16" => "u16", - "uint32x2_t" | "uint32x4_t" | "u32" => "u32", - "uint64x1_t" | "uint64x2_t" | "u64" => "u64", + "int8x8_t" | "int8x16_t" | "i8" | "int8x8x2_t" | "int8x8x3_t" | "int8x8x4_t" + | "int8x16x2_t" | "int8x16x3_t" | "int8x16x4_t" => "i8", + "int16x4_t" | "int16x8_t" | "i16" | "int16x4x2_t" | "int16x4x3_t" | "int16x4x4_t" + | "int16x8x2_t" | "int16x8x3_t" | "int16x8x4_t" => "i16", + "int32x2_t" | "int32x4_t" | "i32" | "int32x2x2_t" | "int32x2x3_t" | "int32x2x4_t" + | "int32x4x2_t" | "int32x4x3_t" | "int32x4x4_t" => "i32", + "int64x1_t" | "int64x2_t" | "i64" | "int64x1x2_t" | "int64x1x3_t" | "int64x1x4_t" + | "int64x2x2_t" | "int64x2x3_t" | "int64x2x4_t" => "i64", + "uint8x8_t" | "uint8x16_t" | "u8" | "uint8x8x2_t" | "uint8x8x3_t" | "uint8x8x4_t" + | "uint8x16x2_t" | "uint8x16x3_t" | "uint8x16x4_t" => "u8", + "uint16x4_t" | "uint16x8_t" | "u16" | "uint16x4x2_t" | "uint16x4x3_t" | "uint16x4x4_t" + | "uint16x8x2_t" | "uint16x8x3_t" | "uint16x8x4_t" => "u16", + "uint32x2_t" | "uint32x4_t" | "u32" | "uint32x2x2_t" | "uint32x2x3_t" | "uint32x2x4_t" + | "uint32x4x2_t" | "uint32x4x3_t" | "uint32x4x4_t" => "u32", + "uint64x1_t" | "uint64x2_t" | "u64" | "uint64x1x2_t" | "uint64x1x3_t" | "uint64x1x4_t" + | "uint64x2x2_t" | "uint64x2x3_t" | "uint64x2x4_t" => "u64", "float16x4_t" | "float16x8_t" => "f16", - "float32x2_t" | "float32x4_t" => "f32", - "float64x1_t" | "float64x2_t" => "f64", - "poly64x1_t" | "poly64x2_t" => "u64", + "float32x2_t" | "float32x4_t" | "float32x2x2_t" | "float32x2x3_t" | "float32x2x4_t" + | "float32x4x2_t" | "float32x4x3_t" | "float32x4x4_t" => "f32", + "float64x1_t" | "float64x2_t" | "float64x1x2_t" | "float64x1x3_t" | "float64x1x4_t" + | "float64x2x2_t" | "float64x2x3_t" | "float64x2x4_t" => "f64", + "poly8x8_t" | "poly8x16_t" | "poly8x8x2_t" | "poly8x8x3_t" | "poly8x8x4_t" + | "poly8x16x2_t" | "poly8x16x3_t" | "poly8x16x4_t" => "u8", + "poly16x4_t" | "poly16x8_t" | "poly16x4x2_t" | "poly16x4x3_t" | "poly16x4x4_t" + | "poly16x8x2_t" | "poly16x8x3_t" | "poly16x8x4_t" => "u16", + "poly64x1_t" | "poly64x2_t" | "poly64x1x2_t" | "poly64x1x3_t" | "poly64x1x4_t" + | "poly64x2x2_t" | "poly64x2x3_t" | "poly64x2x4_t" => "u64", _ => panic!("unknown type: {}", t), } } @@ -510,6 +591,26 @@ fn type_to_ext(t: &str) -> &str { "poly8x16_t" => "v16i8", "poly16x4_t" => "v4i16", "poly16x8_t" => "v8i16", + "int8x8x2_t" | "int8x8x3_t" | "int8x8x4_t" => "v8i8.p0i8", + "int16x4x2_t" | "int16x4x3_t" | "int16x4x4_t" => "v4i16.p0i16", + "int32x2x2_t" | "int32x2x3_t" | "int32x2x4_t" => "v2i32.p0i32", + "int64x1x2_t" | "int64x1x3_t" | "int64x1x4_t" => "v1i64.p0i64", + "uint8x8x2_t" | "uint8x8x3_t" | "uint8x8x4_t" => "v8i8.p0i8", + "uint16x4x2_t" | "uint16x4x3_t" | "uint16x4x4_t" => "v4i16.p0i16", + "uint32x2x2_t" | "uint32x2x3_t" | "uint32x2x4_t" => "v2i32.p0i32", + "uint64x1x2_t" | "uint64x1x3_t" | "uint64x1x4_t" => "v1i64.p0i64", + "float32x2x2_t" | "float32x2x3_t" | "float32x2x4_t" => "v2f32.p0f32", + "float64x1x2_t" | "float64x1x3_t" | "float64x1x4_t" => "v1f64.p0f64", + "int8x16x2_t" | "int8x16x3_t" | "int8x16x4_t" => "v16i8.p0i8", + "int16x8x2_t" | "int16x8x3_t" | "int16x8x4_t" => "v8i16.p0i16", + "int32x4x2_t" | "int32x4x3_t" | "int32x4x4_t" => "v4i32.p0i32", + "int64x2x2_t" | "int64x2x3_t" | "int64x2x4_t" => "v2i64.p0i64", + "uint8x16x2_t" | "uint8x16x3_t" | "uint8x16x4_t" => "v16i8.p0i8", + "uint16x8x2_t" | "uint16x8x3_t" | "uint16x8x4_t" => "v8i16.p0i16", + "uint32x4x2_t" | "uint32x4x3_t" | "uint32x4x4_t" => "v4i32.p0i32", + "uint64x2x2_t" | "uint64x2x3_t" | "uint64x2x4_t" => "v2i64.p0i64", + "float32x4x2_t" | "float32x4x3_t" | "float32x4x4_t" => "v4f32.p0f32", + "float64x2x2_t" | "float64x2x3_t" | "float64x2x4_t" => "v2f64.p0f64", "i8" => "i8", "i16" => "i16", "i32" => "i32", @@ -522,6 +623,16 @@ fn type_to_ext(t: &str) -> &str { "f64" => "f64", "p64" => "p64", "p128" => "p128", + "*const i8" => "i8", + "*const i16" => "i16", + "*const i32" => "i32", + "*const i64" => "i64", + "*const u8" => "i8", + "*const u16" => "i16", + "*const u32" => "i32", + "*const u64" => "i64", + "*const f32" => "f32", + "*const f64" => "f64", /* "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", @@ -858,9 +969,8 @@ fn gen_aarch64( target: TargetFeature, fixed: &Vec, multi_fn: &Vec, + test_fn: &str, ) -> (String, String) { - let _global_t = type_to_global_type(in_t[0]); - let _global_ret_t = type_to_global_type(out_t); let name = match suffix { Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), @@ -941,7 +1051,7 @@ fn gen_aarch64( }; ext_c = format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}) -> {}; }} @@ -965,7 +1075,7 @@ fn gen_aarch64( if const_aarch64.is_some() { ext_c_const = format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}) -> {}; }} @@ -1162,17 +1272,94 @@ fn gen_aarch64( current_comment, current_target, current_aarch64, const_assert, const_legacy, call ); - let test = gen_test( - &name, - in_t, - &out_t, - current_tests, - [type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])], - type_len(out_t), - para_num, - ); + let test = if test_fn == "load_test" { + gen_load_test(&name, in_t, &out_t, current_tests, type_len(out_t)) + } else { + gen_test( + &name, + in_t, + &out_t, + current_tests, + [type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])], + type_len(out_t), + para_num, + ) + }; (function, test) } +fn gen_load_test( + name: &str, + _in_t: &[&str; 3], + out_t: &str, + current_tests: &[( + Vec, + Vec, + Vec, + Option, + Vec, + )], + len_out: usize, +) -> String { + let mut test = format!( + r#" + #[simd_test(enable = "neon")] + unsafe fn test_{}() {{"#, + name, + ); + for (a, _, _, _, e) in current_tests { + let a: Vec = a.iter().take(len_out + 1).cloned().collect(); + let e: Vec = e.iter().take(len_out).cloned().collect(); + let mut input = String::from("["); + for i in 0..type_len(out_t) + 1 { + if i != 0 { + input.push_str(", "); + } + input.push_str(&a[i]) + } + input.push_str("]"); + let mut output = String::from("["); + for i in 0..type_sub_len(out_t) { + if i != 0 { + output.push_str(", "); + } + let sub_len = type_len(out_t) / type_sub_len(out_t); + if type_to_global_type(out_t) != "f64" { + let mut sub_output = format!("{}::new(", type_to_global_type(out_t)); + for j in 0..sub_len { + if j != 0 { + sub_output.push_str(", "); + } + sub_output.push_str(&e[i * sub_len + j]); + } + sub_output.push_str(")"); + output.push_str(&sub_output); + } else { + output.push_str(&e[i]); + } + } + output.push_str("]"); + let t = format!( + r#" + let a: [{}; {}] = {}; + let e: [{}; {}] = {}; + let r: [{}; {}] = transmute({}(a[1..].as_ptr())); + assert_eq!(r, e); +"#, + type_to_native_type(out_t), + type_len(out_t) + 1, + input, + type_to_global_type(out_t), + type_sub_len(out_t), + output, + type_to_global_type(out_t), + type_sub_len(out_t), + name, + ); + test.push_str(&t); + } + test.push_str(" }\n"); + test +} fn gen_test( name: &str, @@ -1305,9 +1492,8 @@ fn gen_arm( target: TargetFeature, fixed: &Vec, multi_fn: &Vec, + test_fn: &str, ) -> (String, String) { - let _global_t = type_to_global_type(in_t[0]); - let _global_ret_t = type_to_global_type(out_t); let name = match suffix { Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), @@ -1440,7 +1626,7 @@ fn gen_arm( if out_t == link_arm_t[3] && out_t == link_aarch64_t[3] { ext_c = format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "{}")] #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}) -> {}; @@ -1476,7 +1662,7 @@ fn gen_arm( }; ext_c_arm.push_str(&format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "{}")] fn {}({}) -> {}; }} @@ -1504,7 +1690,7 @@ fn gen_arm( if out_t != link_arm_t[3] { ext_c_arm.push_str(&format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "{}")] fn {}({}) -> {}; }} @@ -1532,7 +1718,7 @@ fn gen_arm( if const_aarch64.is_some() { ext_c_aarch64.push_str(&format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}) -> {}; }} @@ -1557,7 +1743,7 @@ fn gen_arm( if out_t != link_aarch64_t[3] { ext_c_aarch64.push_str(&format!( r#"#[allow(improper_ctypes)] - extern "C" {{ + extern "unadjusted" {{ #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}) -> {}; }} @@ -1880,15 +2066,19 @@ fn gen_arm( call, ) }; - let test = gen_test( - &name, - in_t, - &out_t, - current_tests, - [type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])], - type_len(out_t), - para_num, - ); + let test = if test_fn == "load_test" { + gen_load_test(&name, in_t, &out_t, current_tests, type_len(out_t)) + } else { + gen_test( + &name, + in_t, + &out_t, + current_tests, + [type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])], + type_len(out_t), + para_num, + ) + }; (function, test) } @@ -2305,7 +2495,9 @@ fn get_call( } else if fn_format[1] == "in2lane" { fn_name.push_str(&type_to_lane_suffixes(out_t, in_t[2])); } else if fn_format[1] == "signed" { - fn_name.push_str(type_to_suffix(type_to_signed(in_t[1]))); + fn_name.push_str(type_to_suffix(&type_to_signed(&String::from(in_t[1])))); + } else if fn_format[1] == "outsigned" { + fn_name.push_str(type_to_suffix(&type_to_signed(&String::from(out_t)))); } else if fn_format[1] == "unsigned" { fn_name.push_str(type_to_suffix(type_to_unsigned(in_t[1]))); } else if fn_format[1] == "doubleself" { @@ -2315,7 +2507,7 @@ fn get_call( } else if fn_format[1] == "noqself" { fn_name.push_str(type_to_noq_suffix(in_t[1])); } else if fn_format[1] == "noqsigned" { - fn_name.push_str(type_to_noq_suffix(type_to_signed(in_t[1]))); + fn_name.push_str(type_to_noq_suffix(&type_to_signed(&String::from(in_t[1])))); } else if fn_format[1] == "nosuffix" { } else if fn_format[1] == "in_len" { fn_name.push_str(&type_len(in_t[1]).to_string()); @@ -2330,7 +2522,7 @@ fn get_call( } else if fn_format[1] == "nin0" { fn_name.push_str(type_to_n_suffix(in_t[0])); } else if fn_format[1] == "nsigned" { - fn_name.push_str(type_to_n_suffix(type_to_signed(in_t[1]))); + fn_name.push_str(type_to_n_suffix(&type_to_signed(&String::from(in_t[1])))); } else if fn_format[1] == "in_ntt" { fn_name.push_str(type_to_suffix(native_type_to_type(in_t[1]))); } else if fn_format[1] == "out_ntt" { @@ -2410,6 +2602,7 @@ fn main() -> io::Result<()> { )> = Vec::new(); let mut multi_fn: Vec = Vec::new(); let mut target: TargetFeature = Default; + let mut test_fn = "normal"; // // THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY @@ -2491,6 +2684,7 @@ mod test { n = None; multi_fn = Vec::new(); target = Default; + test_fn = "normal"; } else if line.starts_with("//") { } else if line.starts_with("name = ") { current_name = Some(String::from(&line[7..])); @@ -2547,6 +2741,14 @@ mod test { link_arm = Some(String::from(&line[11..])); } else if line.starts_with("const-arm = ") { const_arm = Some(String::from(&line[12..])); + } else if line.starts_with("test = ") { + test_fn = if line.contains("load_test") { + "load_test" + } else if line.contains("store_test") { + "store_test" + } else { + "normal" + } } else if line.starts_with("target = ") { target = match Some(String::from(&line[9..])) { Some(input) => match input.as_str() { @@ -2618,6 +2820,7 @@ mod test { target, &fixed, &multi_fn, + test_fn, ); out_arm.push_str(&function); tests_arm.push_str(&test); @@ -2638,6 +2841,7 @@ mod test { target, &fixed, &multi_fn, + test_fn, ); out_aarch64.push_str(&function); tests_aarch64.push_str(&test); diff --git a/library/stdarch/crates/stdarch-verify/src/lib.rs b/library/stdarch/crates/stdarch-verify/src/lib.rs index e85f0489a887..5836949122d8 100644 --- a/library/stdarch/crates/stdarch-verify/src/lib.rs +++ b/library/stdarch/crates/stdarch-verify/src/lib.rs @@ -218,11 +218,29 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "int8x16_t" => quote! { &I8X16 }, "int16x2_t" => quote! { &I16X2 }, "int16x4_t" => quote! { &I16X4 }, + "int16x4x2_t" => quote! { &I16X4X2 }, + "int16x4x3_t" => quote! { &I16x4x3 }, + "int16x4x4_t" => quote! { &I16x4x4 }, "int16x8_t" => quote! { &I16X8 }, + "int16x8x2_t" => quote! { &I16X8X2 }, + "int16x8x3_t" => quote! { &I16x8x3 }, + "int16x8x4_t" => quote! { &I16x8x4 }, "int32x2_t" => quote! { &I32X2 }, + "int32x2x2_t" => quote! { &I32X2X2 }, + "int32x2x3_t" => quote! { &I32X2X3 }, + "int32x2x4_t" => quote! { &I32X2X4 }, "int32x4_t" => quote! { &I32X4 }, + "int32x4x2_t" => quote! { &I32X4X2 }, + "int32x4x3_t" => quote! { &I32X4X3 }, + "int32x4x4_t" => quote! { &I32X4X4 }, "int64x1_t" => quote! { &I64X1 }, + "int64x1x2_t" => quote! { &I64X1X2 }, + "int64x1x3_t" => quote! { &I64X1X3 }, + "int64x1x4_t" => quote! { &I64X1X4 }, "int64x2_t" => quote! { &I64X2 }, + "int64x2x2_t" => quote! { &I64X2X2 }, + "int64x2x3_t" => quote! { &I64X2X3 }, + "int64x2x4_t" => quote! { &I64X2X4 }, "uint8x8_t" => quote! { &U8X8 }, "uint8x4_t" => quote! { &U8X4 }, "uint8x8x2_t" => quote! { &U8X8X2 }, @@ -233,15 +251,45 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "uint8x8x4_t" => quote! { &U8X8X4 }, "uint8x16_t" => quote! { &U8X16 }, "uint16x4_t" => quote! { &U16X4 }, + "uint16x4x2_t" => quote! { &U16X4X2 }, + "uint16x4x3_t" => quote! { &U16x4x3 }, + "uint16x4x4_t" => quote! { &U16x4x4 }, "uint16x8_t" => quote! { &U16X8 }, + "uint16x8x2_t" => quote! { &U16X8X2 }, + "uint16x8x3_t" => quote! { &U16x8x3 }, + "uint16x8x4_t" => quote! { &U16x8x4 }, "uint32x2_t" => quote! { &U32X2 }, + "uint32x2x2_t" => quote! { &U32X2X2 }, + "uint32x2x3_t" => quote! { &U32X2X3 }, + "uint32x2x4_t" => quote! { &U32X2X4 }, "uint32x4_t" => quote! { &U32X4 }, + "uint32x4x2_t" => quote! { &U32X4X2 }, + "uint32x4x3_t" => quote! { &U32X4X3 }, + "uint32x4x4_t" => quote! { &U32X4X4 }, "uint64x1_t" => quote! { &U64X1 }, + "uint64x1x2_t" => quote! { &U64X1X2 }, + "uint64x1x3_t" => quote! { &U64X1X3 }, + "uint64x1x4_t" => quote! { &U64X1X4 }, "uint64x2_t" => quote! { &U64X2 }, + "uint64x2x2_t" => quote! { &U64X2X2 }, + "uint64x2x3_t" => quote! { &U64X2X3 }, + "uint64x2x4_t" => quote! { &U64X2X4 }, "float32x2_t" => quote! { &F32X2 }, + "float32x2x2_t" => quote! { &F32X2X2 }, + "float32x2x3_t" => quote! { &F32X2X3 }, + "float32x2x4_t" => quote! { &F32X2X4 }, "float32x4_t" => quote! { &F32X4 }, + "float32x4x2_t" => quote! { &F32X4X2 }, + "float32x4x3_t" => quote! { &F32X4X3 }, + "float32x4x4_t" => quote! { &F32X4X4 }, "float64x1_t" => quote! { &F64X1 }, + "float64x1x2_t" => quote! { &F64X1X2 }, + "float64x1x3_t" => quote! { &F64X1X3 }, + "float64x1x4_t" => quote! { &F64X1X4 }, "float64x2_t" => quote! { &F64X2 }, + "float64x2x2_t" => quote! { &F64X2X2 }, + "float64x2x3_t" => quote! { &F64X2X3 }, + "float64x2x4_t" => quote! { &F64X2X4 }, "poly8x8_t" => quote! { &POLY8X8 }, "poly8x8x2_t" => quote! { &POLY8X8X2 }, "poly8x8x3_t" => quote! { &POLY8X8X3 }, @@ -254,7 +302,13 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "poly64x2_t" => quote! { &POLY64X2 }, "poly8x16_t" => quote! { &POLY8X16 }, "poly16x4_t" => quote! { &POLY16X4 }, + "poly16x4x2_t" => quote! { &POLY16X4X2 }, + "poly16x4x3_t" => quote! { &POLY16X4X3 }, + "poly16x4x4_t" => quote! { &POLY16X4X4 }, "poly16x8_t" => quote! { &POLY16X8 }, + "poly16x8x2_t" => quote! { &POLY16X8X2 }, + "poly16x8x3_t" => quote! { &POLY16X8X3 }, + "poly16x8x4_t" => quote! { &POLY16X8X4 }, "p128" => quote! { &P128 }, "v16i8" => quote! { &v16i8 },