add neon instruction vmov_n_* and vmovq_n_* (#1085)
This commit is contained in:
parent
4773f9b1d2
commit
7947cb8bac
3 changed files with 592 additions and 11 deletions
|
|
@ -1507,6 +1507,70 @@ pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t {
|
|||
simd_shuffle2(low, high, [0, 1])
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(fmov))]
|
||||
pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t {
|
||||
transmute(u64x1::new(value))
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(ldr))]
|
||||
pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t {
|
||||
float64x1_t(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(dup))]
|
||||
pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t {
|
||||
transmute(u64x2::new(value, value))
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(dup))]
|
||||
pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t {
|
||||
float64x2_t(value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(fmov))]
|
||||
pub unsafe fn vmov_n_p64(value: p64) -> poly64x1_t {
|
||||
vdup_n_p64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(ldr))]
|
||||
pub unsafe fn vmov_n_f64(value: f64) -> float64x1_t {
|
||||
vdup_n_f64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_p64(value: p64) -> poly64x2_t {
|
||||
vdupq_n_p64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(test, assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t {
|
||||
vdupq_n_f64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -3443,6 +3507,70 @@ mod tests {
|
|||
test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64]));
|
||||
test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64]));
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdup_n_f64() {
|
||||
let a: f64 = 3.3;
|
||||
let e = f64x1::new(3.3);
|
||||
let r: f64x1 = transmute(vdup_n_f64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdup_n_p64() {
|
||||
let a: u64 = 3;
|
||||
let e = u64x1::new(3);
|
||||
let r: u64x1 = transmute(vdup_n_p64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdupq_n_f64() {
|
||||
let a: f64 = 3.3;
|
||||
let e = f64x2::new(3.3, 3.3);
|
||||
let r: f64x2 = transmute(vdupq_n_f64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdupq_n_p64() {
|
||||
let a: u64 = 3;
|
||||
let e = u64x2::new(3, 3);
|
||||
let r: u64x2 = transmute(vdupq_n_p64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_p64() {
|
||||
let a: u64 = 3;
|
||||
let e = u64x1::new(3);
|
||||
let r: u64x1 = transmute(vmov_n_p64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_f64() {
|
||||
let a: f64 = 3.3;
|
||||
let e = f64x1::new(3.3);
|
||||
let r: f64x1 = transmute(vmov_n_f64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_p64() {
|
||||
let a: u64 = 3;
|
||||
let e = u64x2::new(3, 3);
|
||||
let r: u64x2 = transmute(vmovq_n_p64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_f64() {
|
||||
let a: f64 = 3.3;
|
||||
let e = f64x2::new(3.3, 3.3);
|
||||
let r: f64x2 = transmute(vmovq_n_f64(transmute(a)));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vget_high_f64() {
|
||||
let a = f64x2::new(1.0, 2.0);
|
||||
|
|
|
|||
|
|
@ -3995,6 +3995,16 @@ pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t {
|
|||
int32x4_t(value, value, value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t {
|
||||
int64x2_t(value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -4028,6 +4038,16 @@ pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t {
|
|||
uint32x4_t(value, value, value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t {
|
||||
uint64x2_t(value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -4061,11 +4081,6 @@ pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t {
|
|||
float32x4_t(value, value, value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar.
|
||||
/// This instruction duplicates the vector element at the specified element index
|
||||
/// in the source SIMD&FP register into a scalar or each element in a vector,
|
||||
/// and writes the result to the destination SIMD&FP register.
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -4096,6 +4111,16 @@ pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t {
|
|||
int32x2_t(value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
|
||||
pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t {
|
||||
int64x1_t(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -4126,6 +4151,16 @@ pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t {
|
|||
uint32x2_t(value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
|
||||
pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t {
|
||||
uint64x1_t(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -4156,6 +4191,156 @@ pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t {
|
|||
float32x2_t(value, value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t {
|
||||
vdup_n_s8(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t {
|
||||
vdup_n_s16(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t {
|
||||
vdup_n_s32(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
|
||||
pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t {
|
||||
vdup_n_s64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t {
|
||||
vdup_n_u8(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t {
|
||||
vdup_n_u16(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t {
|
||||
vdup_n_u32(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmov))]
|
||||
pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t {
|
||||
vdup_n_u64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t {
|
||||
vdup_n_p8(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t {
|
||||
vdup_n_p16(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t {
|
||||
vdup_n_f32(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t {
|
||||
vdupq_n_s8(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t {
|
||||
vdupq_n_s16(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t {
|
||||
vdupq_n_s32(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t {
|
||||
vdupq_n_s64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -4166,6 +4351,66 @@ pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t {
|
|||
vdupq_n_u8(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t {
|
||||
vdupq_n_u16(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t {
|
||||
vdupq_n_u32(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t {
|
||||
vdupq_n_u64(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t {
|
||||
vdupq_n_p8(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t {
|
||||
vdupq_n_p16(value)
|
||||
}
|
||||
|
||||
/// Duplicate vector element to vector or scalar
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
|
||||
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))]
|
||||
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup))]
|
||||
pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t {
|
||||
vdupq_n_f32(value)
|
||||
}
|
||||
|
||||
/// Vector reinterpret cast operation
|
||||
#[inline]
|
||||
#[target_feature(enable = "neon")]
|
||||
|
|
@ -6119,11 +6364,19 @@ mod tests {
|
|||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdupq_n_s64() {
|
||||
let v: i64 = 64;
|
||||
let e = i64x2::new(64, 64);
|
||||
let r: i64x2 = transmute(vdupq_n_s64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdupq_n_u8() {
|
||||
let v: u8 = 42;
|
||||
let v: u8 = 64;
|
||||
let e = u8x16::new(
|
||||
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
);
|
||||
let r: u8x16 = transmute(vdupq_n_u8(v));
|
||||
assert_eq!(r, e);
|
||||
|
|
@ -6145,6 +6398,14 @@ mod tests {
|
|||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdupq_n_u64() {
|
||||
let v: u64 = 64;
|
||||
let e = u64x2::new(64, 64);
|
||||
let r: u64x2 = transmute(vdupq_n_u64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdupq_n_p8() {
|
||||
let v: p8 = 64;
|
||||
|
|
@ -6195,10 +6456,18 @@ mod tests {
|
|||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdup_n_s64() {
|
||||
let v: i64 = 64;
|
||||
let e = i64x1::new(64);
|
||||
let r: i64x1 = transmute(vdup_n_s64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdup_n_u8() {
|
||||
let v: u8 = 42;
|
||||
let e = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
|
||||
let v: u8 = 64;
|
||||
let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: u8x8 = transmute(vdup_n_u8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
|
@ -6219,6 +6488,14 @@ mod tests {
|
|||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdup_n_u64() {
|
||||
let v: u64 = 64;
|
||||
let e = u64x1::new(64);
|
||||
let r: u64x1 = transmute(vdup_n_u64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vdup_n_p8() {
|
||||
let v: p8 = 64;
|
||||
|
|
@ -6243,16 +6520,188 @@ mod tests {
|
|||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_s8() {
|
||||
let v: i8 = 64;
|
||||
let e = i8x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: i8x8 = transmute(vmov_n_s8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_s16() {
|
||||
let v: i16 = 64;
|
||||
let e = i16x4::new(64, 64, 64, 64);
|
||||
let r: i16x4 = transmute(vmov_n_s16(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_s32() {
|
||||
let v: i32 = 64;
|
||||
let e = i32x2::new(64, 64);
|
||||
let r: i32x2 = transmute(vmov_n_s32(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_s64() {
|
||||
let v: i64 = 64;
|
||||
let e = i64x1::new(64);
|
||||
let r: i64x1 = transmute(vmov_n_s64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_u8() {
|
||||
let v: u8 = 64;
|
||||
let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: u8x8 = transmute(vmov_n_u8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_u16() {
|
||||
let v: u16 = 64;
|
||||
let e = u16x4::new(64, 64, 64, 64);
|
||||
let r: u16x4 = transmute(vmov_n_u16(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_u32() {
|
||||
let v: u32 = 64;
|
||||
let e = u32x2::new(64, 64);
|
||||
let r: u32x2 = transmute(vmov_n_u32(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_u64() {
|
||||
let v: u64 = 64;
|
||||
let e = u64x1::new(64);
|
||||
let r: u64x1 = transmute(vmov_n_u64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_p8() {
|
||||
let v: p8 = 64;
|
||||
let e = u8x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: u8x8 = transmute(vmov_n_p8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_p16() {
|
||||
let v: p16 = 64;
|
||||
let e = u16x4::new(64, 64, 64, 64);
|
||||
let r: u16x4 = transmute(vmov_n_p16(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmov_n_f32() {
|
||||
let v: f32 = 64.0;
|
||||
let e = f32x2::new(64.0, 64.0);
|
||||
let r: f32x2 = transmute(vmov_n_f32(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_s8() {
|
||||
let v: i8 = 64;
|
||||
let e = i8x16::new(
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
);
|
||||
let r: i8x16 = transmute(vmovq_n_s8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_s16() {
|
||||
let v: i16 = 64;
|
||||
let e = i16x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: i16x8 = transmute(vmovq_n_s16(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_s32() {
|
||||
let v: i32 = 64;
|
||||
let e = i32x4::new(64, 64, 64, 64);
|
||||
let r: i32x4 = transmute(vmovq_n_s32(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_s64() {
|
||||
let v: i64 = 64;
|
||||
let e = i64x2::new(64, 64);
|
||||
let r: i64x2 = transmute(vmovq_n_s64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_u8() {
|
||||
let v: u8 = 42;
|
||||
let v: u8 = 64;
|
||||
let e = u8x16::new(
|
||||
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
);
|
||||
let r: u8x16 = transmute(vmovq_n_u8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_u16() {
|
||||
let v: u16 = 64;
|
||||
let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: u16x8 = transmute(vmovq_n_u16(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_u32() {
|
||||
let v: u32 = 64;
|
||||
let e = u32x4::new(64, 64, 64, 64);
|
||||
let r: u32x4 = transmute(vmovq_n_u32(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_u64() {
|
||||
let v: u64 = 64;
|
||||
let e = u64x2::new(64, 64);
|
||||
let r: u64x2 = transmute(vmovq_n_u64(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_p8() {
|
||||
let v: p8 = 64;
|
||||
let e = u8x16::new(
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
);
|
||||
let r: u8x16 = transmute(vmovq_n_p8(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_p16() {
|
||||
let v: p16 = 64;
|
||||
let e = u16x8::new(64, 64, 64, 64, 64, 64, 64, 64);
|
||||
let r: u16x8 = transmute(vmovq_n_p16(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vmovq_n_f32() {
|
||||
let v: f32 = 64.0;
|
||||
let e = f32x4::new(64.0, 64.0, 64.0, 64.0);
|
||||
let r: f32x4 = transmute(vmovq_n_f32(v));
|
||||
assert_eq!(r, e);
|
||||
}
|
||||
|
||||
#[simd_test(enable = "neon")]
|
||||
unsafe fn test_vgetq_lane_u64() {
|
||||
let v = i64x2::new(1, 2);
|
||||
|
|
|
|||
|
|
@ -394,6 +394,9 @@ fn verify_all_signatures() {
|
|||
"brk" | "__breakpoint" | "udf" | "_prefetch" => continue,
|
||||
_ => {}
|
||||
}
|
||||
// Skip some intrinsics that are present in GCC and Clang but
|
||||
// are missing from the official documentation.
|
||||
let skip_intrinsic_verify = ["vmov_n_p64", "vmovq_n_p64"];
|
||||
let arm = match map.get(rust.name) {
|
||||
Some(i) => i,
|
||||
None => {
|
||||
|
|
@ -408,6 +411,7 @@ fn verify_all_signatures() {
|
|||
&& !rust.file.ends_with("v7.rs\"")
|
||||
&& !rust.file.ends_with("v8.rs\"")
|
||||
&& !rust.file.ends_with("tme.rs\"")
|
||||
&& !skip_intrinsic_verify.contains(&rust.name)
|
||||
{
|
||||
println!(
|
||||
"missing arm definition for {:?} in {}",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue