diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 9705c4523698..fd6d94f6834c 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -24,6 +24,7 @@ avx512_target_feature, mips_target_feature, powerpc_target_feature, + loongarch_target_feature, wasm_target_feature, abi_unadjusted, rtm_target_feature, diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs new file mode 100644 index 000000000000..c3d5263c23f5 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -0,0 +1,6843 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-loongarch/lsx.spec` and run the following command to re-generate this file: +// +// ``` +// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsx.spec +// ``` + +use super::types::*; + +#[allow(improper_ctypes)] +extern "unadjusted" { + #[link_name = "llvm.loongarch.lsx.vsll.b"] + fn __lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsll.h"] + fn __lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsll.w"] + fn __lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsll.d"] + fn __lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslli.b"] + fn __lsx_vslli_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslli.h"] + fn __lsx_vslli_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslli.w"] + fn __lsx_vslli_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslli.d"] + fn __lsx_vslli_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsra.b"] + fn __lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsra.h"] + fn __lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsra.w"] + fn __lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsra.d"] + fn __lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrai.b"] + fn __lsx_vsrai_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrai.h"] + fn __lsx_vsrai_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrai.w"] + fn __lsx_vsrai_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrai.d"] + fn __lsx_vsrai_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrar.b"] + fn __lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrar.h"] + fn __lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrar.w"] + fn __lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrar.d"] + fn __lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrari.b"] + fn __lsx_vsrari_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrari.h"] + fn __lsx_vsrari_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrari.w"] + fn __lsx_vsrari_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrari.d"] + fn __lsx_vsrari_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrl.b"] + fn __lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrl.h"] + fn __lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrl.w"] + fn __lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrl.d"] + fn __lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrli.b"] + fn __lsx_vsrli_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrli.h"] + fn __lsx_vsrli_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrli.w"] + fn __lsx_vsrli_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrli.d"] + fn __lsx_vsrli_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrlr.b"] + fn __lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrlr.h"] + fn __lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrlr.w"] + fn __lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrlr.d"] + fn __lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrlri.b"] + fn __lsx_vsrlri_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrlri.h"] + fn __lsx_vsrlri_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrlri.w"] + fn __lsx_vsrlri_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrlri.d"] + fn __lsx_vsrlri_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vbitclr.b"] + fn __lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitclr.h"] + fn __lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vbitclr.w"] + fn __lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vbitclr.d"] + fn __lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vbitclri.b"] + fn __lsx_vbitclri_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitclri.h"] + fn __lsx_vbitclri_h(a: v8u16, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vbitclri.w"] + fn __lsx_vbitclri_w(a: v4u32, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vbitclri.d"] + fn __lsx_vbitclri_d(a: v2u64, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vbitset.b"] + fn __lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitset.h"] + fn __lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vbitset.w"] + fn __lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vbitset.d"] + fn __lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vbitseti.b"] + fn __lsx_vbitseti_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitseti.h"] + fn __lsx_vbitseti_h(a: v8u16, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vbitseti.w"] + fn __lsx_vbitseti_w(a: v4u32, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vbitseti.d"] + fn __lsx_vbitseti_d(a: v2u64, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vbitrev.b"] + fn __lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitrev.h"] + fn __lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vbitrev.w"] + fn __lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vbitrev.d"] + fn __lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vbitrevi.b"] + fn __lsx_vbitrevi_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitrevi.h"] + fn __lsx_vbitrevi_h(a: v8u16, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vbitrevi.w"] + fn __lsx_vbitrevi_w(a: v4u32, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vbitrevi.d"] + fn __lsx_vbitrevi_d(a: v2u64, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vadd.b"] + fn __lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vadd.h"] + fn __lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vadd.w"] + fn __lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vadd.d"] + fn __lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddi.bu"] + fn __lsx_vaddi_bu(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vaddi.hu"] + fn __lsx_vaddi_hu(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddi.wu"] + fn __lsx_vaddi_wu(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddi.du"] + fn __lsx_vaddi_du(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsub.b"] + fn __lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsub.h"] + fn __lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsub.w"] + fn __lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsub.d"] + fn __lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubi.bu"] + fn __lsx_vsubi_bu(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsubi.hu"] + fn __lsx_vsubi_hu(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsubi.wu"] + fn __lsx_vsubi_wu(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsubi.du"] + fn __lsx_vsubi_du(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmax.b"] + fn __lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmax.h"] + fn __lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmax.w"] + fn __lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmax.d"] + fn __lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaxi.b"] + fn __lsx_vmaxi_b(a: v16i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmaxi.h"] + fn __lsx_vmaxi_h(a: v8i16, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmaxi.w"] + fn __lsx_vmaxi_w(a: v4i32, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmaxi.d"] + fn __lsx_vmaxi_d(a: v2i64, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmax.bu"] + fn __lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vmax.hu"] + fn __lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmax.wu"] + fn __lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmax.du"] + fn __lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmaxi.bu"] + fn __lsx_vmaxi_bu(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vmaxi.hu"] + fn __lsx_vmaxi_hu(a: v8u16, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmaxi.wu"] + fn __lsx_vmaxi_wu(a: v4u32, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmaxi.du"] + fn __lsx_vmaxi_du(a: v2u64, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmin.b"] + fn __lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmin.h"] + fn __lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmin.w"] + fn __lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmin.d"] + fn __lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmini.b"] + fn __lsx_vmini_b(a: v16i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmini.h"] + fn __lsx_vmini_h(a: v8i16, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmini.w"] + fn __lsx_vmini_w(a: v4i32, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmini.d"] + fn __lsx_vmini_d(a: v2i64, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmin.bu"] + fn __lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vmin.hu"] + fn __lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmin.wu"] + fn __lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmin.du"] + fn __lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmini.bu"] + fn __lsx_vmini_bu(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vmini.hu"] + fn __lsx_vmini_hu(a: v8u16, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmini.wu"] + fn __lsx_vmini_wu(a: v4u32, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmini.du"] + fn __lsx_vmini_du(a: v2u64, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vseq.b"] + fn __lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vseq.h"] + fn __lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vseq.w"] + fn __lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vseq.d"] + fn __lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vseqi.b"] + fn __lsx_vseqi_b(a: v16i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vseqi.h"] + fn __lsx_vseqi_h(a: v8i16, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vseqi.w"] + fn __lsx_vseqi_w(a: v4i32, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vseqi.d"] + fn __lsx_vseqi_d(a: v2i64, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslti.b"] + fn __lsx_vslti_b(a: v16i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslt.b"] + fn __lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslt.h"] + fn __lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslt.w"] + fn __lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslt.d"] + fn __lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslti.h"] + fn __lsx_vslti_h(a: v8i16, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslti.w"] + fn __lsx_vslti_w(a: v4i32, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslti.d"] + fn __lsx_vslti_d(a: v2i64, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslt.bu"] + fn __lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslt.hu"] + fn __lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslt.wu"] + fn __lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslt.du"] + fn __lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslti.bu"] + fn __lsx_vslti_bu(a: v16u8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslti.hu"] + fn __lsx_vslti_hu(a: v8u16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslti.wu"] + fn __lsx_vslti_wu(a: v4u32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslti.du"] + fn __lsx_vslti_du(a: v2u64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsle.b"] + fn __lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsle.h"] + fn __lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsle.w"] + fn __lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsle.d"] + fn __lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslei.b"] + fn __lsx_vslei_b(a: v16i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslei.h"] + fn __lsx_vslei_h(a: v8i16, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslei.w"] + fn __lsx_vslei_w(a: v4i32, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslei.d"] + fn __lsx_vslei_d(a: v2i64, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsle.bu"] + fn __lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsle.hu"] + fn __lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsle.wu"] + fn __lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsle.du"] + fn __lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vslei.bu"] + fn __lsx_vslei_bu(a: v16u8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vslei.hu"] + fn __lsx_vslei_hu(a: v8u16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vslei.wu"] + fn __lsx_vslei_wu(a: v4u32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vslei.du"] + fn __lsx_vslei_du(a: v2u64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsat.b"] + fn __lsx_vsat_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsat.h"] + fn __lsx_vsat_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsat.w"] + fn __lsx_vsat_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsat.d"] + fn __lsx_vsat_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsat.bu"] + fn __lsx_vsat_bu(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vsat.hu"] + fn __lsx_vsat_hu(a: v8u16, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vsat.wu"] + fn __lsx_vsat_wu(a: v4u32, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vsat.du"] + fn __lsx_vsat_du(a: v2u64, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vadda.b"] + fn __lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vadda.h"] + fn __lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vadda.w"] + fn __lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vadda.d"] + fn __lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsadd.b"] + fn __lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsadd.h"] + fn __lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsadd.w"] + fn __lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsadd.d"] + fn __lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsadd.bu"] + fn __lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vsadd.hu"] + fn __lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vsadd.wu"] + fn __lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vsadd.du"] + fn __lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vavg.b"] + fn __lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vavg.h"] + fn __lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vavg.w"] + fn __lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vavg.d"] + fn __lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vavg.bu"] + fn __lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vavg.hu"] + fn __lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vavg.wu"] + fn __lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vavg.du"] + fn __lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vavgr.b"] + fn __lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vavgr.h"] + fn __lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vavgr.w"] + fn __lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vavgr.d"] + fn __lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vavgr.bu"] + fn __lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vavgr.hu"] + fn __lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vavgr.wu"] + fn __lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vavgr.du"] + fn __lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vssub.b"] + fn __lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssub.h"] + fn __lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssub.w"] + fn __lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssub.d"] + fn __lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssub.bu"] + fn __lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssub.hu"] + fn __lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssub.wu"] + fn __lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vssub.du"] + fn __lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vabsd.b"] + fn __lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vabsd.h"] + fn __lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vabsd.w"] + fn __lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vabsd.d"] + fn __lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vabsd.bu"] + fn __lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vabsd.hu"] + fn __lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vabsd.wu"] + fn __lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vabsd.du"] + fn __lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmul.b"] + fn __lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmul.h"] + fn __lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmul.w"] + fn __lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmul.d"] + fn __lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmadd.b"] + fn __lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmadd.h"] + fn __lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmadd.w"] + fn __lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmadd.d"] + fn __lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmsub.b"] + fn __lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmsub.h"] + fn __lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmsub.w"] + fn __lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmsub.d"] + fn __lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vdiv.b"] + fn __lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vdiv.h"] + fn __lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vdiv.w"] + fn __lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vdiv.d"] + fn __lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vdiv.bu"] + fn __lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vdiv.hu"] + fn __lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vdiv.wu"] + fn __lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vdiv.du"] + fn __lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vhaddw.h.b"] + fn __lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vhaddw.w.h"] + fn __lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vhaddw.d.w"] + fn __lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vhaddw.hu.bu"] + fn __lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vhaddw.wu.hu"] + fn __lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vhaddw.du.wu"] + fn __lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vhsubw.h.b"] + fn __lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vhsubw.w.h"] + fn __lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vhsubw.d.w"] + fn __lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vhsubw.hu.bu"] + fn __lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vhsubw.wu.hu"] + fn __lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vhsubw.du.wu"] + fn __lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmod.b"] + fn __lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmod.h"] + fn __lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmod.w"] + fn __lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmod.d"] + fn __lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmod.bu"] + fn __lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vmod.hu"] + fn __lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmod.wu"] + fn __lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmod.du"] + fn __lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vreplve.b"] + fn __lsx_vreplve_b(a: v16i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vreplve.h"] + fn __lsx_vreplve_h(a: v8i16, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vreplve.w"] + fn __lsx_vreplve_w(a: v4i32, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vreplve.d"] + fn __lsx_vreplve_d(a: v2i64, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vreplvei.b"] + fn __lsx_vreplvei_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vreplvei.h"] + fn __lsx_vreplvei_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vreplvei.w"] + fn __lsx_vreplvei_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vreplvei.d"] + fn __lsx_vreplvei_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vpickev.b"] + fn __lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vpickev.h"] + fn __lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vpickev.w"] + fn __lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vpickev.d"] + fn __lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vpickod.b"] + fn __lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vpickod.h"] + fn __lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vpickod.w"] + fn __lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vpickod.d"] + fn __lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vilvh.b"] + fn __lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vilvh.h"] + fn __lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vilvh.w"] + fn __lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vilvh.d"] + fn __lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vilvl.b"] + fn __lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vilvl.h"] + fn __lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vilvl.w"] + fn __lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vilvl.d"] + fn __lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vpackev.b"] + fn __lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vpackev.h"] + fn __lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vpackev.w"] + fn __lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vpackev.d"] + fn __lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vpackod.b"] + fn __lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vpackod.h"] + fn __lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vpackod.w"] + fn __lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vpackod.d"] + fn __lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vshuf.h"] + fn __lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vshuf.w"] + fn __lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vshuf.d"] + fn __lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vand.v"] + fn __lsx_vand_v(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vandi.b"] + fn __lsx_vandi_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vor.v"] + fn __lsx_vor_v(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vori.b"] + fn __lsx_vori_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vnor.v"] + fn __lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vnori.b"] + fn __lsx_vnori_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vxor.v"] + fn __lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vxori.b"] + fn __lsx_vxori_b(a: v16u8, b: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitsel.v"] + fn __lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vbitseli.b"] + fn __lsx_vbitseli_b(a: v16u8, b: v16u8, c: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vshuf4i.b"] + fn __lsx_vshuf4i_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vshuf4i.h"] + fn __lsx_vshuf4i_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vshuf4i.w"] + fn __lsx_vshuf4i_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vreplgr2vr.b"] + fn __lsx_vreplgr2vr_b(a: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vreplgr2vr.h"] + fn __lsx_vreplgr2vr_h(a: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vreplgr2vr.w"] + fn __lsx_vreplgr2vr_w(a: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vreplgr2vr.d"] + fn __lsx_vreplgr2vr_d(a: i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vpcnt.b"] + fn __lsx_vpcnt_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vpcnt.h"] + fn __lsx_vpcnt_h(a: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vpcnt.w"] + fn __lsx_vpcnt_w(a: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vpcnt.d"] + fn __lsx_vpcnt_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vclo.b"] + fn __lsx_vclo_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vclo.h"] + fn __lsx_vclo_h(a: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vclo.w"] + fn __lsx_vclo_w(a: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vclo.d"] + fn __lsx_vclo_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vclz.b"] + fn __lsx_vclz_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vclz.h"] + fn __lsx_vclz_h(a: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vclz.w"] + fn __lsx_vclz_w(a: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vclz.d"] + fn __lsx_vclz_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.b"] + fn __lsx_vpickve2gr_b(a: v16i8, b: u32) -> i32; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.h"] + fn __lsx_vpickve2gr_h(a: v8i16, b: u32) -> i32; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.w"] + fn __lsx_vpickve2gr_w(a: v4i32, b: u32) -> i32; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.d"] + fn __lsx_vpickve2gr_d(a: v2i64, b: u32) -> i64; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.bu"] + fn __lsx_vpickve2gr_bu(a: v16i8, b: u32) -> u32; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.hu"] + fn __lsx_vpickve2gr_hu(a: v8i16, b: u32) -> u32; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.wu"] + fn __lsx_vpickve2gr_wu(a: v4i32, b: u32) -> u32; + #[link_name = "llvm.loongarch.lsx.vpickve2gr.du"] + fn __lsx_vpickve2gr_du(a: v2i64, b: u32) -> u64; + #[link_name = "llvm.loongarch.lsx.vinsgr2vr.b"] + fn __lsx_vinsgr2vr_b(a: v16i8, b: i32, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vinsgr2vr.h"] + fn __lsx_vinsgr2vr_h(a: v8i16, b: i32, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vinsgr2vr.w"] + fn __lsx_vinsgr2vr_w(a: v4i32, b: i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vinsgr2vr.d"] + fn __lsx_vinsgr2vr_d(a: v2i64, b: i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfadd.s"] + fn __lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfadd.d"] + fn __lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfsub.s"] + fn __lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfsub.d"] + fn __lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfmul.s"] + fn __lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmul.d"] + fn __lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfdiv.s"] + fn __lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfdiv.d"] + fn __lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfcvt.h.s"] + fn __lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vfcvt.s.d"] + fn __lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmin.s"] + fn __lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmin.d"] + fn __lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfmina.s"] + fn __lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmina.d"] + fn __lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfmax.s"] + fn __lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmax.d"] + fn __lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfmaxa.s"] + fn __lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmaxa.d"] + fn __lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfclass.s"] + fn __lsx_vfclass_s(a: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfclass.d"] + fn __lsx_vfclass_d(a: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfsqrt.s"] + fn __lsx_vfsqrt_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfsqrt.d"] + fn __lsx_vfsqrt_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfrecip.s"] + fn __lsx_vfrecip_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrecip.d"] + fn __lsx_vfrecip_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfrint.s"] + fn __lsx_vfrint_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrint.d"] + fn __lsx_vfrint_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfrsqrt.s"] + fn __lsx_vfrsqrt_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrsqrt.d"] + fn __lsx_vfrsqrt_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vflogb.s"] + fn __lsx_vflogb_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vflogb.d"] + fn __lsx_vflogb_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfcvth.s.h"] + fn __lsx_vfcvth_s_h(a: v8i16) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfcvth.d.s"] + fn __lsx_vfcvth_d_s(a: v4f32) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfcvtl.s.h"] + fn __lsx_vfcvtl_s_h(a: v8i16) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfcvtl.d.s"] + fn __lsx_vfcvtl_d_s(a: v4f32) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vftint.w.s"] + fn __lsx_vftint_w_s(a: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftint.l.d"] + fn __lsx_vftint_l_d(a: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftint.wu.s"] + fn __lsx_vftint_wu_s(a: v4f32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vftint.lu.d"] + fn __lsx_vftint_lu_d(a: v2f64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vftintrz.w.s"] + fn __lsx_vftintrz_w_s(a: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrz.l.d"] + fn __lsx_vftintrz_l_d(a: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrz.wu.s"] + fn __lsx_vftintrz_wu_s(a: v4f32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vftintrz.lu.d"] + fn __lsx_vftintrz_lu_d(a: v2f64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vffint.s.w"] + fn __lsx_vffint_s_w(a: v4i32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vffint.d.l"] + fn __lsx_vffint_d_l(a: v2i64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vffint.s.wu"] + fn __lsx_vffint_s_wu(a: v4u32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vffint.d.lu"] + fn __lsx_vffint_d_lu(a: v2u64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vandn.v"] + fn __lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vneg.b"] + fn __lsx_vneg_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vneg.h"] + fn __lsx_vneg_h(a: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vneg.w"] + fn __lsx_vneg_w(a: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vneg.d"] + fn __lsx_vneg_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmuh.b"] + fn __lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmuh.h"] + fn __lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmuh.w"] + fn __lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmuh.d"] + fn __lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmuh.bu"] + fn __lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vmuh.hu"] + fn __lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmuh.wu"] + fn __lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmuh.du"] + fn __lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vsllwil.h.b"] + fn __lsx_vsllwil_h_b(a: v16i8, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsllwil.w.h"] + fn __lsx_vsllwil_w_h(a: v8i16, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsllwil.d.w"] + fn __lsx_vsllwil_d_w(a: v4i32, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsllwil.hu.bu"] + fn __lsx_vsllwil_hu_bu(a: v16u8, b: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vsllwil.wu.hu"] + fn __lsx_vsllwil_wu_hu(a: v8u16, b: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vsllwil.du.wu"] + fn __lsx_vsllwil_du_wu(a: v4u32, b: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vsran.b.h"] + fn __lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsran.h.w"] + fn __lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsran.w.d"] + fn __lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssran.b.h"] + fn __lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssran.h.w"] + fn __lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssran.w.d"] + fn __lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssran.bu.h"] + fn __lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssran.hu.w"] + fn __lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssran.wu.d"] + fn __lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vsrarn.b.h"] + fn __lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrarn.h.w"] + fn __lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrarn.w.d"] + fn __lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrarn.b.h"] + fn __lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrarn.h.w"] + fn __lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrarn.w.d"] + fn __lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrarn.bu.h"] + fn __lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrarn.hu.w"] + fn __lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrarn.wu.d"] + fn __lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vsrln.b.h"] + fn __lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrln.h.w"] + fn __lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrln.w.d"] + fn __lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrln.bu.h"] + fn __lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrln.hu.w"] + fn __lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrln.wu.d"] + fn __lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vsrlrn.b.h"] + fn __lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrlrn.h.w"] + fn __lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrlrn.w.d"] + fn __lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrlrn.bu.h"] + fn __lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrlrn.hu.w"] + fn __lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrlrn.wu.d"] + fn __lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vfrstpi.b"] + fn __lsx_vfrstpi_b(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vfrstpi.h"] + fn __lsx_vfrstpi_h(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vfrstp.b"] + fn __lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vfrstp.h"] + fn __lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vshuf4i.d"] + fn __lsx_vshuf4i_d(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vbsrl.v"] + fn __lsx_vbsrl_v(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vbsll.v"] + fn __lsx_vbsll_v(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vextrins.b"] + fn __lsx_vextrins_b(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vextrins.h"] + fn __lsx_vextrins_h(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vextrins.w"] + fn __lsx_vextrins_w(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vextrins.d"] + fn __lsx_vextrins_d(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmskltz.b"] + fn __lsx_vmskltz_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmskltz.h"] + fn __lsx_vmskltz_h(a: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmskltz.w"] + fn __lsx_vmskltz_w(a: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmskltz.d"] + fn __lsx_vmskltz_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsigncov.b"] + fn __lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsigncov.h"] + fn __lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsigncov.w"] + fn __lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsigncov.d"] + fn __lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfmadd.s"] + fn __lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmadd.d"] + fn __lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfmsub.s"] + fn __lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfmsub.d"] + fn __lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfnmadd.s"] + fn __lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfnmadd.d"] + fn __lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfnmsub.s"] + fn __lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfnmsub.d"] + fn __lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vftintrne.w.s"] + fn __lsx_vftintrne_w_s(a: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrne.l.d"] + fn __lsx_vftintrne_l_d(a: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrp.w.s"] + fn __lsx_vftintrp_w_s(a: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrp.l.d"] + fn __lsx_vftintrp_l_d(a: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrm.w.s"] + fn __lsx_vftintrm_w_s(a: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrm.l.d"] + fn __lsx_vftintrm_l_d(a: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftint.w.d"] + fn __lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vffint.s.l"] + fn __lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vftintrz.w.d"] + fn __lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrp.w.d"] + fn __lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrm.w.d"] + fn __lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintrne.w.d"] + fn __lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vftintl.l.s"] + fn __lsx_vftintl_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftinth.l.s"] + fn __lsx_vftinth_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vffinth.d.w"] + fn __lsx_vffinth_d_w(a: v4i32) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vffintl.d.w"] + fn __lsx_vffintl_d_w(a: v4i32) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vftintrzl.l.s"] + fn __lsx_vftintrzl_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrzh.l.s"] + fn __lsx_vftintrzh_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrpl.l.s"] + fn __lsx_vftintrpl_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrph.l.s"] + fn __lsx_vftintrph_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrml.l.s"] + fn __lsx_vftintrml_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrmh.l.s"] + fn __lsx_vftintrmh_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrnel.l.s"] + fn __lsx_vftintrnel_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vftintrneh.l.s"] + fn __lsx_vftintrneh_l_s(a: v4f32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfrintrne.s"] + fn __lsx_vfrintrne_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrintrne.d"] + fn __lsx_vfrintrne_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfrintrz.s"] + fn __lsx_vfrintrz_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrintrz.d"] + fn __lsx_vfrintrz_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfrintrp.s"] + fn __lsx_vfrintrp_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrintrp.d"] + fn __lsx_vfrintrp_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vfrintrm.s"] + fn __lsx_vfrintrm_s(a: v4f32) -> v4f32; + #[link_name = "llvm.loongarch.lsx.vfrintrm.d"] + fn __lsx_vfrintrm_d(a: v2f64) -> v2f64; + #[link_name = "llvm.loongarch.lsx.vstelm.b"] + fn __lsx_vstelm_b(a: v16i8, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lsx.vstelm.h"] + fn __lsx_vstelm_h(a: v8i16, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lsx.vstelm.w"] + fn __lsx_vstelm_w(a: v4i32, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lsx.vstelm.d"] + fn __lsx_vstelm_d(a: v2i64, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lsx.vaddwev.d.w"] + fn __lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwev.w.h"] + fn __lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddwev.h.b"] + fn __lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddwod.d.w"] + fn __lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwod.w.h"] + fn __lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddwod.h.b"] + fn __lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddwev.d.wu"] + fn __lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwev.w.hu"] + fn __lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddwev.h.bu"] + fn __lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddwod.d.wu"] + fn __lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwod.w.hu"] + fn __lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddwod.h.bu"] + fn __lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddwev.d.wu.w"] + fn __lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwev.w.hu.h"] + fn __lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddwev.h.bu.b"] + fn __lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddwod.d.wu.w"] + fn __lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwod.w.hu.h"] + fn __lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vaddwod.h.bu.b"] + fn __lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsubwev.d.w"] + fn __lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwev.w.h"] + fn __lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsubwev.h.b"] + fn __lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsubwod.d.w"] + fn __lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwod.w.h"] + fn __lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsubwod.h.b"] + fn __lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsubwev.d.wu"] + fn __lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwev.w.hu"] + fn __lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsubwev.h.bu"] + fn __lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsubwod.d.wu"] + fn __lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwod.w.hu"] + fn __lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsubwod.h.bu"] + fn __lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vaddwev.q.d"] + fn __lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwod.q.d"] + fn __lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwev.q.du"] + fn __lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwod.q.du"] + fn __lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwev.q.d"] + fn __lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwod.q.d"] + fn __lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwev.q.du"] + fn __lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsubwod.q.du"] + fn __lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwev.q.du.d"] + fn __lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vaddwod.q.du.d"] + fn __lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwev.d.w"] + fn __lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwev.w.h"] + fn __lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmulwev.h.b"] + fn __lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmulwod.d.w"] + fn __lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwod.w.h"] + fn __lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmulwod.h.b"] + fn __lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmulwev.d.wu"] + fn __lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwev.w.hu"] + fn __lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmulwev.h.bu"] + fn __lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmulwod.d.wu"] + fn __lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwod.w.hu"] + fn __lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmulwod.h.bu"] + fn __lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmulwev.d.wu.w"] + fn __lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwev.w.hu.h"] + fn __lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmulwev.h.bu.b"] + fn __lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmulwod.d.wu.w"] + fn __lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwod.w.hu.h"] + fn __lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmulwod.h.bu.b"] + fn __lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmulwev.q.d"] + fn __lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwod.q.d"] + fn __lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwev.q.du"] + fn __lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwod.q.du"] + fn __lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwev.q.du.d"] + fn __lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmulwod.q.du.d"] + fn __lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vhaddw.q.d"] + fn __lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vhaddw.qu.du"] + fn __lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vhsubw.q.d"] + fn __lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vhsubw.qu.du"] + fn __lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmaddwev.d.w"] + fn __lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwev.w.h"] + fn __lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmaddwev.h.b"] + fn __lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmaddwev.d.wu"] + fn __lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmaddwev.w.hu"] + fn __lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmaddwev.h.bu"] + fn __lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmaddwod.d.w"] + fn __lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwod.w.h"] + fn __lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmaddwod.h.b"] + fn __lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmaddwod.d.wu"] + fn __lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmaddwod.w.hu"] + fn __lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vmaddwod.h.bu"] + fn __lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vmaddwev.d.wu.w"] + fn __lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwev.w.hu.h"] + fn __lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmaddwev.h.bu.b"] + fn __lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmaddwod.d.wu.w"] + fn __lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwod.w.hu.h"] + fn __lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vmaddwod.h.bu.b"] + fn __lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vmaddwev.q.d"] + fn __lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwod.q.d"] + fn __lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwev.q.du"] + fn __lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmaddwod.q.du"] + fn __lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vmaddwev.q.du.d"] + fn __lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmaddwod.q.du.d"] + fn __lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vrotr.b"] + fn __lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vrotr.h"] + fn __lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vrotr.w"] + fn __lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vrotr.d"] + fn __lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vadd.q"] + fn __lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsub.q"] + fn __lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vldrepl.b"] + fn __lsx_vldrepl_b(a: *const i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vldrepl.h"] + fn __lsx_vldrepl_h(a: *const i8, b: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vldrepl.w"] + fn __lsx_vldrepl_w(a: *const i8, b: i32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vldrepl.d"] + fn __lsx_vldrepl_d(a: *const i8, b: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vmskgez.b"] + fn __lsx_vmskgez_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vmsknz.b"] + fn __lsx_vmsknz_b(a: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vexth.h.b"] + fn __lsx_vexth_h_b(a: v16i8) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vexth.w.h"] + fn __lsx_vexth_w_h(a: v8i16) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vexth.d.w"] + fn __lsx_vexth_d_w(a: v4i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vexth.q.d"] + fn __lsx_vexth_q_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vexth.hu.bu"] + fn __lsx_vexth_hu_bu(a: v16u8) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vexth.wu.hu"] + fn __lsx_vexth_wu_hu(a: v8u16) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vexth.du.wu"] + fn __lsx_vexth_du_wu(a: v4u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vexth.qu.du"] + fn __lsx_vexth_qu_du(a: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vrotri.b"] + fn __lsx_vrotri_b(a: v16i8, b: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vrotri.h"] + fn __lsx_vrotri_h(a: v8i16, b: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vrotri.w"] + fn __lsx_vrotri_w(a: v4i32, b: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vrotri.d"] + fn __lsx_vrotri_d(a: v2i64, b: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vextl.q.d"] + fn __lsx_vextl_q_d(a: v2i64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrlni.b.h"] + fn __lsx_vsrlni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrlni.h.w"] + fn __lsx_vsrlni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrlni.w.d"] + fn __lsx_vsrlni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrlni.d.q"] + fn __lsx_vsrlni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrlrni.b.h"] + fn __lsx_vsrlrni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrlrni.h.w"] + fn __lsx_vsrlrni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrlrni.w.d"] + fn __lsx_vsrlrni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrlrni.d.q"] + fn __lsx_vsrlrni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssrlni.b.h"] + fn __lsx_vssrlni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrlni.h.w"] + fn __lsx_vssrlni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrlni.w.d"] + fn __lsx_vssrlni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrlni.d.q"] + fn __lsx_vssrlni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssrlni.bu.h"] + fn __lsx_vssrlni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrlni.hu.w"] + fn __lsx_vssrlni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrlni.wu.d"] + fn __lsx_vssrlni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vssrlni.du.q"] + fn __lsx_vssrlni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vssrlrni.b.h"] + fn __lsx_vssrlrni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrlrni.h.w"] + fn __lsx_vssrlrni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrlrni.w.d"] + fn __lsx_vssrlrni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrlrni.d.q"] + fn __lsx_vssrlrni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssrlrni.bu.h"] + fn __lsx_vssrlrni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrlrni.hu.w"] + fn __lsx_vssrlrni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrlrni.wu.d"] + fn __lsx_vssrlrni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vssrlrni.du.q"] + fn __lsx_vssrlrni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vsrani.b.h"] + fn __lsx_vsrani_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrani.h.w"] + fn __lsx_vsrani_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrani.w.d"] + fn __lsx_vsrani_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrani.d.q"] + fn __lsx_vsrani_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vsrarni.b.h"] + fn __lsx_vsrarni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vsrarni.h.w"] + fn __lsx_vsrarni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vsrarni.w.d"] + fn __lsx_vsrarni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vsrarni.d.q"] + fn __lsx_vsrarni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssrani.b.h"] + fn __lsx_vssrani_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrani.h.w"] + fn __lsx_vssrani_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrani.w.d"] + fn __lsx_vssrani_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrani.d.q"] + fn __lsx_vssrani_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssrani.bu.h"] + fn __lsx_vssrani_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrani.hu.w"] + fn __lsx_vssrani_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrani.wu.d"] + fn __lsx_vssrani_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vssrani.du.q"] + fn __lsx_vssrani_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vssrarni.b.h"] + fn __lsx_vssrarni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrarni.h.w"] + fn __lsx_vssrarni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrarni.w.d"] + fn __lsx_vssrarni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrarni.d.q"] + fn __lsx_vssrarni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vssrarni.bu.h"] + fn __lsx_vssrarni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8; + #[link_name = "llvm.loongarch.lsx.vssrarni.hu.w"] + fn __lsx_vssrarni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16; + #[link_name = "llvm.loongarch.lsx.vssrarni.wu.d"] + fn __lsx_vssrarni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32; + #[link_name = "llvm.loongarch.lsx.vssrarni.du.q"] + fn __lsx_vssrarni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64; + #[link_name = "llvm.loongarch.lsx.vpermi.w"] + fn __lsx_vpermi_w(a: v4i32, b: v4i32, c: u32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vld"] + fn __lsx_vld(a: *const i8, b: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vst"] + fn __lsx_vst(a: v16i8, b: *mut i8, c: i32) ; + #[link_name = "llvm.loongarch.lsx.vssrlrn.b.h"] + fn __lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrlrn.h.w"] + fn __lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrlrn.w.d"] + fn __lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vssrln.b.h"] + fn __lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vssrln.h.w"] + fn __lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vssrln.w.d"] + fn __lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vorn.v"] + fn __lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vldi"] + fn __lsx_vldi(a: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vshuf.b"] + fn __lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vldx"] + fn __lsx_vldx(a: *const i8, b: i64) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vstx"] + fn __lsx_vstx(a: v16i8, b: *mut i8, c: i64) ; + #[link_name = "llvm.loongarch.lsx.vextl.qu.du"] + fn __lsx_vextl_qu_du(a: v2u64) -> v2u64; + #[link_name = "llvm.loongarch.lsx.bnz.b"] + fn __lsx_bnz_b(a: v16u8) -> i32; + #[link_name = "llvm.loongarch.lsx.bnz.d"] + fn __lsx_bnz_d(a: v2u64) -> i32; + #[link_name = "llvm.loongarch.lsx.bnz.h"] + fn __lsx_bnz_h(a: v8u16) -> i32; + #[link_name = "llvm.loongarch.lsx.bnz.v"] + fn __lsx_bnz_v(a: v16u8) -> i32; + #[link_name = "llvm.loongarch.lsx.bnz.w"] + fn __lsx_bnz_w(a: v4u32) -> i32; + #[link_name = "llvm.loongarch.lsx.bz.b"] + fn __lsx_bz_b(a: v16u8) -> i32; + #[link_name = "llvm.loongarch.lsx.bz.d"] + fn __lsx_bz_d(a: v2u64) -> i32; + #[link_name = "llvm.loongarch.lsx.bz.h"] + fn __lsx_bz_h(a: v8u16) -> i32; + #[link_name = "llvm.loongarch.lsx.bz.v"] + fn __lsx_bz_v(a: v16u8) -> i32; + #[link_name = "llvm.loongarch.lsx.bz.w"] + fn __lsx_bz_w(a: v4u32) -> i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.caf.d"] + fn __lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.caf.s"] + fn __lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.ceq.d"] + fn __lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.ceq.s"] + fn __lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cle.d"] + fn __lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cle.s"] + fn __lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.clt.d"] + fn __lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.clt.s"] + fn __lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cne.d"] + fn __lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cne.s"] + fn __lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cor.d"] + fn __lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cor.s"] + fn __lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cueq.d"] + fn __lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cueq.s"] + fn __lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cule.d"] + fn __lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cule.s"] + fn __lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cult.d"] + fn __lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cult.s"] + fn __lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cun.d"] + fn __lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cune.d"] + fn __lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.cune.s"] + fn __lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.cun.s"] + fn __lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.saf.d"] + fn __lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.saf.s"] + fn __lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.seq.d"] + fn __lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.seq.s"] + fn __lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sle.d"] + fn __lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sle.s"] + fn __lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.slt.d"] + fn __lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.slt.s"] + fn __lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sne.d"] + fn __lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sne.s"] + fn __lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sor.d"] + fn __lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sor.s"] + fn __lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sueq.d"] + fn __lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sueq.s"] + fn __lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sule.d"] + fn __lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sule.s"] + fn __lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sult.d"] + fn __lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sult.s"] + fn __lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sun.d"] + fn __lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sune.d"] + fn __lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vfcmp.sune.s"] + fn __lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vfcmp.sun.s"] + fn __lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32; + #[link_name = "llvm.loongarch.lsx.vrepli.b"] + fn __lsx_vrepli_b(a: i32) -> v16i8; + #[link_name = "llvm.loongarch.lsx.vrepli.d"] + fn __lsx_vrepli_d(a: i32) -> v2i64; + #[link_name = "llvm.loongarch.lsx.vrepli.h"] + fn __lsx_vrepli_h(a: i32) -> v8i16; + #[link_name = "llvm.loongarch.lsx.vrepli.w"] + fn __lsx_vrepli_w(a: i32) -> v4i32; +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsll_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsll_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsll_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsll_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslli_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vslli_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslli_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vslli_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslli_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslli_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslli_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vslli_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsra_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsra_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsra_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsra_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrai_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsrai_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrai_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrai_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrai_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrai_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrai_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrai_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsrar_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsrar_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsrar_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsrar_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrari_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsrari_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrari_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrari_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrari_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrari_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrari_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrari_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsrl_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsrl_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsrl_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsrl_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrli_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsrli_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrli_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrli_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrli_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrli_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrli_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrli_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsrlr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsrlr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsrlr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsrlr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlri_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsrlri_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlri_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrlri_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlri_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrlri_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlri_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrlri_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vbitclr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vbitclr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vbitclr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vbitclr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclri_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vbitclri_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclri_h(a: v8u16) -> v8u16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vbitclri_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclri_w(a: v4u32) -> v4u32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vbitclri_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitclri_d(a: v2u64) -> v2u64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vbitclri_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vbitset_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vbitset_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vbitset_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vbitset_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitseti_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vbitseti_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitseti_h(a: v8u16) -> v8u16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vbitseti_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitseti_w(a: v4u32) -> v4u32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vbitseti_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitseti_d(a: v2u64) -> v2u64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vbitseti_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vbitrev_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vbitrev_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vbitrev_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vbitrev_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrevi_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vbitrevi_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrevi_h(a: v8u16) -> v8u16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vbitrevi_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrevi_w(a: v4u32) -> v4u32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vbitrevi_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitrevi_d(a: v2u64) -> v2u64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vbitrevi_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vadd_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vadd_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vadd_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vadd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddi_bu(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vaddi_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddi_hu(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vaddi_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddi_wu(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vaddi_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddi_du(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vaddi_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsub_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsub_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsub_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsub_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubi_bu(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsubi_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubi_hu(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsubi_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubi_wu(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsubi_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubi_du(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsubi_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vmax_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vmax_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vmax_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmax_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_b(a: v16i8) -> v16i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmaxi_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_h(a: v8i16) -> v8i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmaxi_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_w(a: v4i32) -> v4i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmaxi_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_d(a: v2i64) -> v2i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmaxi_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vmax_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vmax_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vmax_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vmax_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_bu(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmaxi_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_hu(a: v8u16) -> v8u16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmaxi_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_wu(a: v4u32) -> v4u32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmaxi_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaxi_du(a: v2u64) -> v2u64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmaxi_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vmin_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vmin_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vmin_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmin_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_b(a: v16i8) -> v16i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmini_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_h(a: v8i16) -> v8i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmini_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_w(a: v4i32) -> v4i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmini_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_d(a: v2i64) -> v2i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vmini_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vmin_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vmin_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vmin_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vmin_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_bu(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmini_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_hu(a: v8u16) -> v8u16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmini_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_wu(a: v4u32) -> v4u32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmini_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmini_du(a: v2u64) -> v2u64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vmini_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vseq_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vseq_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vseq_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vseq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseqi_b(a: v16i8) -> v16i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vseqi_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseqi_h(a: v8i16) -> v8i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vseqi_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseqi_w(a: v4i32) -> v4i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vseqi_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vseqi_d(a: v2i64) -> v2i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vseqi_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_b(a: v16i8) -> v16i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslti_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vslt_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vslt_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vslt_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vslt_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_h(a: v8i16) -> v8i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslti_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_w(a: v4i32) -> v4i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslti_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_d(a: v2i64) -> v2i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslti_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8 { + __lsx_vslt_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16 { + __lsx_vslt_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32 { + __lsx_vslt_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vslt_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_bu(a: v16u8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslti_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_hu(a: v8u16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslti_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_wu(a: v4u32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslti_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslti_du(a: v2u64) -> v2i64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslti_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsle_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsle_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsle_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsle_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_b(a: v16i8) -> v16i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslei_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_h(a: v8i16) -> v8i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslei_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_w(a: v4i32) -> v4i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslei_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_d(a: v2i64) -> v2i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lsx_vslei_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8 { + __lsx_vsle_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16 { + __lsx_vsle_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32 { + __lsx_vsle_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vsle_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_bu(a: v16u8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslei_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_hu(a: v8u16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslei_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_wu(a: v4u32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslei_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vslei_du(a: v2u64) -> v2i64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vslei_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsat_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsat_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsat_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsat_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_bu(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsat_bu(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_hu(a: v8u16) -> v8u16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsat_hu(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_wu(a: v4u32) -> v4u32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsat_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsat_du(a: v2u64) -> v2u64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsat_du(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vadda_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vadda_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vadda_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vadda_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsadd_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsadd_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsadd_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsadd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vsadd_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vsadd_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vsadd_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vsadd_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vavg_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vavg_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vavg_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vavg_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vavg_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vavg_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vavg_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vavg_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vavgr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vavgr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vavgr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vavgr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vavgr_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vavgr_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vavgr_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vavgr_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vssub_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vssub_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vssub_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vssub_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vssub_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vssub_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vssub_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vssub_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vabsd_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vabsd_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vabsd_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vabsd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vabsd_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vabsd_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vabsd_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vabsd_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vmul_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vmul_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vmul_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmul_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + __lsx_vmadd_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + __lsx_vmadd_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + __lsx_vmadd_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + __lsx_vmadd_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + __lsx_vmsub_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + __lsx_vmsub_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + __lsx_vmsub_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + __lsx_vmsub_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vdiv_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vdiv_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vdiv_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vdiv_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vdiv_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vdiv_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vdiv_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vdiv_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vhaddw_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vhaddw_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vhaddw_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16 { + __lsx_vhaddw_hu_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32 { + __lsx_vhaddw_wu_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64 { + __lsx_vhaddw_du_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vhsubw_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vhsubw_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vhsubw_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vhsubw_hu_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vhsubw_wu_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vhsubw_du_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vmod_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vmod_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vmod_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmod_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vmod_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vmod_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vmod_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vmod_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplve_b(a: v16i8, b: i32) -> v16i8 { + __lsx_vreplve_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplve_h(a: v8i16, b: i32) -> v8i16 { + __lsx_vreplve_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplve_w(a: v4i32, b: i32) -> v4i32 { + __lsx_vreplve_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplve_d(a: v2i64, b: i32) -> v2i64 { + __lsx_vreplve_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplvei_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vreplvei_b(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplvei_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vreplvei_h(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplvei_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM2, 2); + __lsx_vreplvei_w(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplvei_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM1, 1); + __lsx_vreplvei_d(a, IMM1) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vpickev_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vpickev_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vpickev_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vpickev_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vpickod_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vpickod_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vpickod_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vpickod_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vilvh_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vilvh_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vilvh_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vilvh_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vilvl_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vilvl_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vilvl_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vilvl_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vpackev_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vpackev_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vpackev_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vpackev_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vpackod_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vpackod_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vpackod_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vpackod_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + __lsx_vshuf_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + __lsx_vshuf_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + __lsx_vshuf_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vand_v(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vand_v(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vandi_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vandi_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vor_v(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vor_v(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vori_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vori_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vnor_v(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vnori_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vnori_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vxor_v(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vxori_b(a: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vxori_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + __lsx_vbitsel_v(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbitseli_b(a: v16u8, b: v16u8) -> v16u8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vbitseli_b(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf4i_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vshuf4i_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf4i_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vshuf4i_h(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf4i_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vshuf4i_w(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplgr2vr_b(a: i32) -> v16i8 { + __lsx_vreplgr2vr_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplgr2vr_h(a: i32) -> v8i16 { + __lsx_vreplgr2vr_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplgr2vr_w(a: i32) -> v4i32 { + __lsx_vreplgr2vr_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vreplgr2vr_d(a: i64) -> v2i64 { + __lsx_vreplgr2vr_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpcnt_b(a: v16i8) -> v16i8 { + __lsx_vpcnt_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpcnt_h(a: v8i16) -> v8i16 { + __lsx_vpcnt_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpcnt_w(a: v4i32) -> v4i32 { + __lsx_vpcnt_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpcnt_d(a: v2i64) -> v2i64 { + __lsx_vpcnt_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclo_b(a: v16i8) -> v16i8 { + __lsx_vclo_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclo_h(a: v8i16) -> v8i16 { + __lsx_vclo_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclo_w(a: v4i32) -> v4i32 { + __lsx_vclo_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclo_d(a: v2i64) -> v2i64 { + __lsx_vclo_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclz_b(a: v16i8) -> v16i8 { + __lsx_vclz_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclz_h(a: v8i16) -> v8i16 { + __lsx_vclz_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclz_w(a: v4i32) -> v4i32 { + __lsx_vclz_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vclz_d(a: v2i64) -> v2i64 { + __lsx_vclz_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_b(a: v16i8) -> i32 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vpickve2gr_b(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_h(a: v8i16) -> i32 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vpickve2gr_h(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_w(a: v4i32) -> i32 { + static_assert_uimm_bits!(IMM2, 2); + __lsx_vpickve2gr_w(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_d(a: v2i64) -> i64 { + static_assert_uimm_bits!(IMM1, 1); + __lsx_vpickve2gr_d(a, IMM1) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_bu(a: v16i8) -> u32 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vpickve2gr_bu(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_hu(a: v8i16) -> u32 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vpickve2gr_hu(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_wu(a: v4i32) -> u32 { + static_assert_uimm_bits!(IMM2, 2); + __lsx_vpickve2gr_wu(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpickve2gr_du(a: v2i64) -> u64 { + static_assert_uimm_bits!(IMM1, 1); + __lsx_vpickve2gr_du(a, IMM1) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vinsgr2vr_b(a: v16i8, b: i32) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vinsgr2vr_b(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vinsgr2vr_h(a: v8i16, b: i32) -> v8i16 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vinsgr2vr_h(a, b, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vinsgr2vr_w(a: v4i32, b: i32) -> v4i32 { + static_assert_uimm_bits!(IMM2, 2); + __lsx_vinsgr2vr_w(a, b, IMM2) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vinsgr2vr_d(a: v2i64, b: i64) -> v2i64 { + static_assert_uimm_bits!(IMM1, 1); + __lsx_vinsgr2vr_d(a, b, IMM1) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfadd_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfadd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfsub_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfsub_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfmul_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfmul_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfdiv_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfdiv_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16 { + __lsx_vfcvt_h_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32 { + __lsx_vfcvt_s_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfmin_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfmin_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfmina_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfmina_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfmax_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfmax_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32 { + __lsx_vfmaxa_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64 { + __lsx_vfmaxa_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfclass_s(a: v4f32) -> v4i32 { + __lsx_vfclass_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfclass_d(a: v2f64) -> v2i64 { + __lsx_vfclass_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfsqrt_s(a: v4f32) -> v4f32 { + __lsx_vfsqrt_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfsqrt_d(a: v2f64) -> v2f64 { + __lsx_vfsqrt_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrecip_s(a: v4f32) -> v4f32 { + __lsx_vfrecip_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrecip_d(a: v2f64) -> v2f64 { + __lsx_vfrecip_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrint_s(a: v4f32) -> v4f32 { + __lsx_vfrint_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrint_d(a: v2f64) -> v2f64 { + __lsx_vfrint_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrsqrt_s(a: v4f32) -> v4f32 { + __lsx_vfrsqrt_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrsqrt_d(a: v2f64) -> v2f64 { + __lsx_vfrsqrt_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vflogb_s(a: v4f32) -> v4f32 { + __lsx_vflogb_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vflogb_d(a: v2f64) -> v2f64 { + __lsx_vflogb_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcvth_s_h(a: v8i16) -> v4f32 { + __lsx_vfcvth_s_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcvth_d_s(a: v4f32) -> v2f64 { + __lsx_vfcvth_d_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcvtl_s_h(a: v8i16) -> v4f32 { + __lsx_vfcvtl_s_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcvtl_d_s(a: v4f32) -> v2f64 { + __lsx_vfcvtl_d_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftint_w_s(a: v4f32) -> v4i32 { + __lsx_vftint_w_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftint_l_d(a: v2f64) -> v2i64 { + __lsx_vftint_l_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftint_wu_s(a: v4f32) -> v4u32 { + __lsx_vftint_wu_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftint_lu_d(a: v2f64) -> v2u64 { + __lsx_vftint_lu_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrz_w_s(a: v4f32) -> v4i32 { + __lsx_vftintrz_w_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrz_l_d(a: v2f64) -> v2i64 { + __lsx_vftintrz_l_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrz_wu_s(a: v4f32) -> v4u32 { + __lsx_vftintrz_wu_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrz_lu_d(a: v2f64) -> v2u64 { + __lsx_vftintrz_lu_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffint_s_w(a: v4i32) -> v4f32 { + __lsx_vffint_s_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffint_d_l(a: v2i64) -> v2f64 { + __lsx_vffint_d_l(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffint_s_wu(a: v4u32) -> v4f32 { + __lsx_vffint_s_wu(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffint_d_lu(a: v2u64) -> v2f64 { + __lsx_vffint_d_lu(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vandn_v(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vneg_b(a: v16i8) -> v16i8 { + __lsx_vneg_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vneg_h(a: v8i16) -> v8i16 { + __lsx_vneg_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vneg_w(a: v4i32) -> v4i32 { + __lsx_vneg_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vneg_d(a: v2i64) -> v2i64 { + __lsx_vneg_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vmuh_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vmuh_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vmuh_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmuh_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8 { + __lsx_vmuh_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16 { + __lsx_vmuh_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32 { + __lsx_vmuh_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vmuh_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsllwil_h_b(a: v16i8) -> v8i16 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsllwil_h_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsllwil_w_h(a: v8i16) -> v4i32 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsllwil_w_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsllwil_d_w(a: v4i32) -> v2i64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsllwil_d_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsllwil_hu_bu(a: v16u8) -> v8u16 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vsllwil_hu_bu(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsllwil_wu_hu(a: v8u16) -> v4u32 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsllwil_wu_hu(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsllwil_du_wu(a: v4u32) -> v2u64 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsllwil_du_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vsran_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vsran_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vsran_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vssran_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vssran_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vssran_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8 { + __lsx_vssran_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16 { + __lsx_vssran_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32 { + __lsx_vssran_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vsrarn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vsrarn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vsrarn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vssrarn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vssrarn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vssrarn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8 { + __lsx_vssrarn_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16 { + __lsx_vssrarn_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32 { + __lsx_vssrarn_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vsrln_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vsrln_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vsrln_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8 { + __lsx_vssrln_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16 { + __lsx_vssrln_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32 { + __lsx_vssrln_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vsrlrn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vsrlrn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vsrlrn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8 { + __lsx_vssrlrn_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16 { + __lsx_vssrlrn_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32 { + __lsx_vssrlrn_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrstpi_b(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vfrstpi_b(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrstpi_h(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vfrstpi_h(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + __lsx_vfrstp_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + __lsx_vfrstp_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf4i_d(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vshuf4i_d(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbsrl_v(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vbsrl_v(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vbsll_v(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vbsll_v(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vextrins_b(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vextrins_b(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vextrins_h(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vextrins_h(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vextrins_w(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vextrins_w(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vextrins_d(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vextrins_d(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmskltz_b(a: v16i8) -> v16i8 { + __lsx_vmskltz_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmskltz_h(a: v8i16) -> v8i16 { + __lsx_vmskltz_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmskltz_w(a: v4i32) -> v4i32 { + __lsx_vmskltz_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmskltz_d(a: v2i64) -> v2i64 { + __lsx_vmskltz_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vsigncov_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vsigncov_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vsigncov_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsigncov_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + __lsx_vfmadd_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + __lsx_vfmadd_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + __lsx_vfmsub_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + __lsx_vfmsub_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + __lsx_vfnmadd_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + __lsx_vfnmadd_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + __lsx_vfnmsub_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + __lsx_vfnmsub_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrne_w_s(a: v4f32) -> v4i32 { + __lsx_vftintrne_w_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrne_l_d(a: v2f64) -> v2i64 { + __lsx_vftintrne_l_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrp_w_s(a: v4f32) -> v4i32 { + __lsx_vftintrp_w_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrp_l_d(a: v2f64) -> v2i64 { + __lsx_vftintrp_l_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrm_w_s(a: v4f32) -> v4i32 { + __lsx_vftintrm_w_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrm_l_d(a: v2f64) -> v2i64 { + __lsx_vftintrm_l_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32 { + __lsx_vftint_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32 { + __lsx_vffint_s_l(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32 { + __lsx_vftintrz_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32 { + __lsx_vftintrp_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32 { + __lsx_vftintrm_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32 { + __lsx_vftintrne_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintl_l_s(a: v4f32) -> v2i64 { + __lsx_vftintl_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftinth_l_s(a: v4f32) -> v2i64 { + __lsx_vftinth_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffinth_d_w(a: v4i32) -> v2f64 { + __lsx_vffinth_d_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vffintl_d_w(a: v4i32) -> v2f64 { + __lsx_vffintl_d_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrzl_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrzl_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrzh_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrzh_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrpl_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrpl_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrph_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrph_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrml_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrml_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrmh_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrmh_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrnel_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrnel_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vftintrneh_l_s(a: v4f32) -> v2i64 { + __lsx_vftintrneh_l_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrne_s(a: v4f32) -> v4f32 { + __lsx_vfrintrne_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrne_d(a: v2f64) -> v2f64 { + __lsx_vfrintrne_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrz_s(a: v4f32) -> v4f32 { + __lsx_vfrintrz_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrz_d(a: v2f64) -> v2f64 { + __lsx_vfrintrz_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrp_s(a: v4f32) -> v4f32 { + __lsx_vfrintrp_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrp_d(a: v2f64) -> v2f64 { + __lsx_vfrintrp_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrm_s(a: v4f32) -> v4f32 { + __lsx_vfrintrm_s(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfrintrm_d(a: v2f64) -> v2f64 { + __lsx_vfrintrm_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vstelm_b(a: v16i8, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM4, 4); + __lsx_vstelm_b(a, mem_addr, IMM_S8, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vstelm_h(a: v8i16, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM3, 3); + __lsx_vstelm_h(a, mem_addr, IMM_S8, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vstelm_w(a: v4i32, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM2, 2); + __lsx_vstelm_w(a, mem_addr, IMM_S8, IMM2) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vstelm_d(a: v2i64, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM1, 1); + __lsx_vstelm_d(a, mem_addr, IMM_S8, IMM1) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vaddwev_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vaddwev_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vaddwev_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vaddwod_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vaddwod_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vaddwod_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vaddwev_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vaddwev_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vaddwev_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vaddwod_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vaddwod_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vaddwod_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + __lsx_vaddwev_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + __lsx_vaddwev_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + __lsx_vaddwev_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + __lsx_vaddwod_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + __lsx_vaddwod_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + __lsx_vaddwod_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vsubwev_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vsubwev_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vsubwev_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vsubwod_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vsubwod_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vsubwod_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vsubwev_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vsubwev_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vsubwev_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vsubwod_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vsubwod_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vsubwod_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vaddwev_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vaddwod_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vaddwev_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vaddwod_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsubwev_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsubwod_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vsubwev_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vsubwod_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + __lsx_vaddwev_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + __lsx_vaddwod_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vmulwev_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vmulwev_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vmulwev_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64 { + __lsx_vmulwod_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32 { + __lsx_vmulwod_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16 { + __lsx_vmulwod_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vmulwev_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vmulwev_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vmulwev_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { + __lsx_vmulwod_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { + __lsx_vmulwod_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { + __lsx_vmulwod_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + __lsx_vmulwev_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + __lsx_vmulwev_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + __lsx_vmulwev_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + __lsx_vmulwod_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + __lsx_vmulwod_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + __lsx_vmulwod_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmulwev_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vmulwod_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vmulwev_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64 { + __lsx_vmulwod_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + __lsx_vmulwev_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + __lsx_vmulwod_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vhaddw_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vhaddw_qu_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vhsubw_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64 { + __lsx_vhsubw_qu_du(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { + __lsx_vmaddwev_d_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { + __lsx_vmaddwev_w_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { + __lsx_vmaddwev_h_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { + __lsx_vmaddwev_d_wu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { + __lsx_vmaddwev_w_hu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { + __lsx_vmaddwev_h_bu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { + __lsx_vmaddwod_d_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { + __lsx_vmaddwod_w_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { + __lsx_vmaddwod_h_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { + __lsx_vmaddwod_d_wu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { + __lsx_vmaddwod_w_hu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { + __lsx_vmaddwod_h_bu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 { + __lsx_vmaddwev_d_wu_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 { + __lsx_vmaddwev_w_hu_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 { + __lsx_vmaddwev_h_bu_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 { + __lsx_vmaddwod_d_wu_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 { + __lsx_vmaddwod_w_hu_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 { + __lsx_vmaddwod_h_bu_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + __lsx_vmaddwev_q_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + __lsx_vmaddwod_q_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { + __lsx_vmaddwev_q_du(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { + __lsx_vmaddwod_q_du(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 { + __lsx_vmaddwev_q_du_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 { + __lsx_vmaddwod_q_du_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vrotr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16 { + __lsx_vrotr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32 { + __lsx_vrotr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vrotr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vadd_q(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64 { + __lsx_vsub_q(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vldrepl_b(mem_addr: *const i8) -> v16i8 { + static_assert_simm_bits!(IMM_S12, 12); + __lsx_vldrepl_b(mem_addr, IMM_S12) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vldrepl_h(mem_addr: *const i8) -> v8i16 { + static_assert_simm_bits!(IMM_S11, 11); + __lsx_vldrepl_h(mem_addr, IMM_S11) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vldrepl_w(mem_addr: *const i8) -> v4i32 { + static_assert_simm_bits!(IMM_S10, 10); + __lsx_vldrepl_w(mem_addr, IMM_S10) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vldrepl_d(mem_addr: *const i8) -> v2i64 { + static_assert_simm_bits!(IMM_S9, 9); + __lsx_vldrepl_d(mem_addr, IMM_S9) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmskgez_b(a: v16i8) -> v16i8 { + __lsx_vmskgez_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vmsknz_b(a: v16i8) -> v16i8 { + __lsx_vmsknz_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_h_b(a: v16i8) -> v8i16 { + __lsx_vexth_h_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_w_h(a: v8i16) -> v4i32 { + __lsx_vexth_w_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_d_w(a: v4i32) -> v2i64 { + __lsx_vexth_d_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_q_d(a: v2i64) -> v2i64 { + __lsx_vexth_q_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_hu_bu(a: v16u8) -> v8u16 { + __lsx_vexth_hu_bu(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_wu_hu(a: v8u16) -> v4u32 { + __lsx_vexth_wu_hu(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_du_wu(a: v4u32) -> v2u64 { + __lsx_vexth_du_wu(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vexth_qu_du(a: v2u64) -> v2u64 { + __lsx_vexth_qu_du(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotri_b(a: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM3, 3); + __lsx_vrotri_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotri_h(a: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vrotri_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotri_w(a: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vrotri_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrotri_d(a: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vrotri_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vextl_q_d(a: v2i64) -> v2i64 { + __lsx_vextl_q_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlni_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrlni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlni_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrlni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlni_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrlni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlni_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vsrlni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrni_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrlrni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrni_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrlrni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrni_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrlrni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrlrni_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vsrlrni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrlni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrlni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrlni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrlni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_bu_h(a: v16u8, b: v16i8) -> v16u8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrlni_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_hu_w(a: v8u16, b: v8i16) -> v8u16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrlni_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_wu_d(a: v4u32, b: v4i32) -> v4u32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrlni_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlni_du_q(a: v2u64, b: v2i64) -> v2u64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrlni_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrlrni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrlrni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrlrni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrlrni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_bu_h(a: v16u8, b: v16i8) -> v16u8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrlrni_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_hu_w(a: v8u16, b: v8i16) -> v8u16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrlrni_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_wu_d(a: v4u32, b: v4i32) -> v4u32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrlrni_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrni_du_q(a: v2u64, b: v2i64) -> v2u64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrlrni_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrani_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrani_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrani_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrani_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrani_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrani_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrani_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vsrani_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarni_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vsrarni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarni_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vsrarni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarni_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vsrarni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vsrarni_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vsrarni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrani_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrani_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrani_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrani_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_bu_h(a: v16u8, b: v16i8) -> v16u8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrani_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_hu_w(a: v8u16, b: v8i16) -> v8u16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrani_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_wu_d(a: v4u32, b: v4i32) -> v4u32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrani_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrani_du_q(a: v2u64, b: v2i64) -> v2u64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrani_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_b_h(a: v16i8, b: v16i8) -> v16i8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrarni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_h_w(a: v8i16, b: v8i16) -> v8i16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrarni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_w_d(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrarni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_d_q(a: v2i64, b: v2i64) -> v2i64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrarni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_bu_h(a: v16u8, b: v16i8) -> v16u8 { + static_assert_uimm_bits!(IMM4, 4); + __lsx_vssrarni_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_hu_w(a: v8u16, b: v8i16) -> v8u16 { + static_assert_uimm_bits!(IMM5, 5); + __lsx_vssrarni_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_wu_d(a: v4u32, b: v4i32) -> v4u32 { + static_assert_uimm_bits!(IMM6, 6); + __lsx_vssrarni_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrarni_du_q(a: v2u64, b: v2i64) -> v2u64 { + static_assert_uimm_bits!(IMM7, 7); + __lsx_vssrarni_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vpermi_w(a: v4i32, b: v4i32) -> v4i32 { + static_assert_uimm_bits!(IMM8, 8); + __lsx_vpermi_w(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vld(mem_addr: *const i8) -> v16i8 { + static_assert_simm_bits!(IMM_S12, 12); + __lsx_vld(mem_addr, IMM_S12) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vst(a: v16i8, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S12, 12); + __lsx_vst(a, mem_addr, IMM_S12) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vssrlrn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vssrlrn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vssrlrn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8 { + __lsx_vssrln_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16 { + __lsx_vssrln_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32 { + __lsx_vssrln_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8 { + __lsx_vorn_v(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vldi() -> v2i64 { + static_assert_simm_bits!(IMM_S13, 13); + __lsx_vldi(IMM_S13) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + __lsx_vshuf_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> v16i8 { + __lsx_vldx(mem_addr, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vstx(a: v16i8, mem_addr: *mut i8, b: i64) { + __lsx_vstx(a, mem_addr, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vextl_qu_du(a: v2u64) -> v2u64 { + __lsx_vextl_qu_du(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bnz_b(a: v16u8) -> i32 { + __lsx_bnz_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bnz_d(a: v2u64) -> i32 { + __lsx_bnz_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bnz_h(a: v8u16) -> i32 { + __lsx_bnz_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bnz_v(a: v16u8) -> i32 { + __lsx_bnz_v(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bnz_w(a: v4u32) -> i32 { + __lsx_bnz_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bz_b(a: v16u8) -> i32 { + __lsx_bz_b(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bz_d(a: v2u64) -> i32 { + __lsx_bz_d(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bz_h(a: v8u16) -> i32 { + __lsx_bz_h(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bz_v(a: v16u8) -> i32 { + __lsx_bz_v(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_bz_w(a: v4u32) -> i32 { + __lsx_bz_w(a) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_caf_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_caf_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_ceq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_ceq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cle_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cle_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_clt_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_clt_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cne_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cne_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cor_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cor_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cueq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cueq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cule_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cule_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cult_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cult_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cun_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_cune_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cune_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_cun_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_saf_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_saf_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_seq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_seq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sle_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sle_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_slt_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_slt_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sne_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sne_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sor_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sor_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sueq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sueq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sule_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sule_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sult_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sult_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sun_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64 { + __lsx_vfcmp_sune_d(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sune_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32 { + __lsx_vfcmp_sun_s(a, b) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrepli_b() -> v16i8 { + static_assert_simm_bits!(IMM_S10, 10); + __lsx_vrepli_b(IMM_S10) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrepli_d() -> v2i64 { + static_assert_simm_bits!(IMM_S10, 10); + __lsx_vrepli_d(IMM_S10) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrepli_h() -> v8i16 { + static_assert_simm_bits!(IMM_S10, 10); + __lsx_vrepli_h(IMM_S10) +} + +#[inline] +#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lsx_vrepli_w() -> v4i32 { + static_assert_simm_bits!(IMM_S10, 10); + __lsx_vrepli_w(IMM_S10) +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs new file mode 100644 index 000000000000..2acc75c6472c --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs @@ -0,0 +1,13 @@ +//! LoongArch64 LSX intrinsics + +#![allow(non_camel_case_types)] + +#[rustfmt::skip] +mod types; + +#[rustfmt::skip] +mod generated; + +#[rustfmt::skip] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub use self::generated::*; diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs new file mode 100644 index 000000000000..1c92b0a6c8cd --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs @@ -0,0 +1,41 @@ +types! { + /// LOONGARCH-specific 128-bit wide vector of 16 packed `i8`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v16i8(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8); + + /// LOONGARCH-specific 128-bit wide vector of 8 packed `i16`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v8i16(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); + + /// LOONGARCH-specific 128-bit wide vector of 4 packed `i32`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v4i32(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); + + /// LOONGARCH-specific 128-bit wide vector of 2 packed `i64`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v2i64(pub(crate) i64, pub(crate) i64); + + /// LOONGARCH-specific 128-bit wide vector of 16 packed `u8`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v16u8(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8); + + /// LOONGARCH-specific 128-bit wide vector of 8 packed `u16`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v8u16(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); + + /// LOONGARCH-specific 128-bit wide vector of 4 packed `u32`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v4u32(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); + + /// LOONGARCH-specific 128-bit wide vector of 2 packed `u64`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v2u64(pub(crate) u64, pub(crate) u64); + + /// LOONGARCH-specific 128-bit wide vector of 4 packed `f32`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v4f32(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); + + /// LOONGARCH-specific 128-bit wide vector of 2 packed `f64`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v2f64(pub(crate) f64, pub(crate) f64); +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs new file mode 100644 index 000000000000..9c27c2e7919e --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs @@ -0,0 +1,6 @@ +//! `LoongArch` intrinsics + +mod lsx; + +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub use self::lsx::*; diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index da3a0bb940d7..7da359465541 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -263,6 +263,16 @@ pub mod arch { pub mod nvptx { pub use crate::core_arch::nvptx::*; } + + /// Platform-specific intrinsics for the `loongarch` platform. + /// + /// See the [module documentation](../index.html) for more details. + #[cfg(any(target_arch = "loongarch64", doc))] + #[doc(cfg(target_arch = "loongarch64"))] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub mod loongarch64 { + pub use crate::core_arch::loongarch64::*; + } } #[cfg(any(target_arch = "x86", target_arch = "x86_64", doc))] @@ -306,3 +316,7 @@ mod powerpc64; #[cfg(any(target_arch = "nvptx64", doc))] #[doc(cfg(target_arch = "nvptx64"))] mod nvptx; + +#[cfg(any(target_arch = "loongarch64", doc))] +#[doc(cfg(target_arch = "loongarch64"))] +mod loongarch64; diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec b/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec new file mode 100644 index 000000000000..427985df9a28 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec @@ -0,0 +1,3585 @@ +// This code is automatically generated. DO NOT MODIFY. +// ``` +// OUT_DIR=`pwd`/crates/stdarch-gen-loongarch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsxintrin.h +// ``` + +/// lsx_vsll_b +name = lsx_vsll_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsll_h +name = lsx_vsll_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsll_w +name = lsx_vsll_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsll_d +name = lsx_vsll_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vslli_b +name = lsx_vslli_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vslli_h +name = lsx_vslli_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vslli_w +name = lsx_vslli_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vslli_d +name = lsx_vslli_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vsra_b +name = lsx_vsra_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsra_h +name = lsx_vsra_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsra_w +name = lsx_vsra_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsra_d +name = lsx_vsra_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsrai_b +name = lsx_vsrai_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vsrai_h +name = lsx_vsrai_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vsrai_w +name = lsx_vsrai_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vsrai_d +name = lsx_vsrai_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vsrar_b +name = lsx_vsrar_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsrar_h +name = lsx_vsrar_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsrar_w +name = lsx_vsrar_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsrar_d +name = lsx_vsrar_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsrari_b +name = lsx_vsrari_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vsrari_h +name = lsx_vsrari_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vsrari_w +name = lsx_vsrari_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vsrari_d +name = lsx_vsrari_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vsrl_b +name = lsx_vsrl_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsrl_h +name = lsx_vsrl_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsrl_w +name = lsx_vsrl_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsrl_d +name = lsx_vsrl_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsrli_b +name = lsx_vsrli_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vsrli_h +name = lsx_vsrli_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vsrli_w +name = lsx_vsrli_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vsrli_d +name = lsx_vsrli_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vsrlr_b +name = lsx_vsrlr_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsrlr_h +name = lsx_vsrlr_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsrlr_w +name = lsx_vsrlr_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsrlr_d +name = lsx_vsrlr_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsrlri_b +name = lsx_vsrlri_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vsrlri_h +name = lsx_vsrlri_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vsrlri_w +name = lsx_vsrlri_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vsrlri_d +name = lsx_vsrlri_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vbitclr_b +name = lsx_vbitclr_b +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vbitclr_h +name = lsx_vbitclr_h +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vbitclr_w +name = lsx_vbitclr_w +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vbitclr_d +name = lsx_vbitclr_d +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vbitclri_b +name = lsx_vbitclri_b +asm-fmts = vd, vj, ui3 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vbitclri_h +name = lsx_vbitclri_h +asm-fmts = vd, vj, ui4 +data-types = UV8HI, UV8HI, UQI + +/// lsx_vbitclri_w +name = lsx_vbitclri_w +asm-fmts = vd, vj, ui5 +data-types = UV4SI, UV4SI, UQI + +/// lsx_vbitclri_d +name = lsx_vbitclri_d +asm-fmts = vd, vj, ui6 +data-types = UV2DI, UV2DI, UQI + +/// lsx_vbitset_b +name = lsx_vbitset_b +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vbitset_h +name = lsx_vbitset_h +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vbitset_w +name = lsx_vbitset_w +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vbitset_d +name = lsx_vbitset_d +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vbitseti_b +name = lsx_vbitseti_b +asm-fmts = vd, vj, ui3 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vbitseti_h +name = lsx_vbitseti_h +asm-fmts = vd, vj, ui4 +data-types = UV8HI, UV8HI, UQI + +/// lsx_vbitseti_w +name = lsx_vbitseti_w +asm-fmts = vd, vj, ui5 +data-types = UV4SI, UV4SI, UQI + +/// lsx_vbitseti_d +name = lsx_vbitseti_d +asm-fmts = vd, vj, ui6 +data-types = UV2DI, UV2DI, UQI + +/// lsx_vbitrev_b +name = lsx_vbitrev_b +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vbitrev_h +name = lsx_vbitrev_h +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vbitrev_w +name = lsx_vbitrev_w +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vbitrev_d +name = lsx_vbitrev_d +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vbitrevi_b +name = lsx_vbitrevi_b +asm-fmts = vd, vj, ui3 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vbitrevi_h +name = lsx_vbitrevi_h +asm-fmts = vd, vj, ui4 +data-types = UV8HI, UV8HI, UQI + +/// lsx_vbitrevi_w +name = lsx_vbitrevi_w +asm-fmts = vd, vj, ui5 +data-types = UV4SI, UV4SI, UQI + +/// lsx_vbitrevi_d +name = lsx_vbitrevi_d +asm-fmts = vd, vj, ui6 +data-types = UV2DI, UV2DI, UQI + +/// lsx_vadd_b +name = lsx_vadd_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vadd_h +name = lsx_vadd_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vadd_w +name = lsx_vadd_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vadd_d +name = lsx_vadd_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vaddi_bu +name = lsx_vaddi_bu +asm-fmts = vd, vj, ui5 +data-types = V16QI, V16QI, UQI + +/// lsx_vaddi_hu +name = lsx_vaddi_hu +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, UQI + +/// lsx_vaddi_wu +name = lsx_vaddi_wu +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vaddi_du +name = lsx_vaddi_du +asm-fmts = vd, vj, ui5 +data-types = V2DI, V2DI, UQI + +/// lsx_vsub_b +name = lsx_vsub_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsub_h +name = lsx_vsub_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsub_w +name = lsx_vsub_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsub_d +name = lsx_vsub_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsubi_bu +name = lsx_vsubi_bu +asm-fmts = vd, vj, ui5 +data-types = V16QI, V16QI, UQI + +/// lsx_vsubi_hu +name = lsx_vsubi_hu +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, UQI + +/// lsx_vsubi_wu +name = lsx_vsubi_wu +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vsubi_du +name = lsx_vsubi_du +asm-fmts = vd, vj, ui5 +data-types = V2DI, V2DI, UQI + +/// lsx_vmax_b +name = lsx_vmax_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vmax_h +name = lsx_vmax_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vmax_w +name = lsx_vmax_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vmax_d +name = lsx_vmax_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmaxi_b +name = lsx_vmaxi_b +asm-fmts = vd, vj, si5 +data-types = V16QI, V16QI, QI + +/// lsx_vmaxi_h +name = lsx_vmaxi_h +asm-fmts = vd, vj, si5 +data-types = V8HI, V8HI, QI + +/// lsx_vmaxi_w +name = lsx_vmaxi_w +asm-fmts = vd, vj, si5 +data-types = V4SI, V4SI, QI + +/// lsx_vmaxi_d +name = lsx_vmaxi_d +asm-fmts = vd, vj, si5 +data-types = V2DI, V2DI, QI + +/// lsx_vmax_bu +name = lsx_vmax_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vmax_hu +name = lsx_vmax_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vmax_wu +name = lsx_vmax_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vmax_du +name = lsx_vmax_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vmaxi_bu +name = lsx_vmaxi_bu +asm-fmts = vd, vj, ui5 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vmaxi_hu +name = lsx_vmaxi_hu +asm-fmts = vd, vj, ui5 +data-types = UV8HI, UV8HI, UQI + +/// lsx_vmaxi_wu +name = lsx_vmaxi_wu +asm-fmts = vd, vj, ui5 +data-types = UV4SI, UV4SI, UQI + +/// lsx_vmaxi_du +name = lsx_vmaxi_du +asm-fmts = vd, vj, ui5 +data-types = UV2DI, UV2DI, UQI + +/// lsx_vmin_b +name = lsx_vmin_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vmin_h +name = lsx_vmin_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vmin_w +name = lsx_vmin_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vmin_d +name = lsx_vmin_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmini_b +name = lsx_vmini_b +asm-fmts = vd, vj, si5 +data-types = V16QI, V16QI, QI + +/// lsx_vmini_h +name = lsx_vmini_h +asm-fmts = vd, vj, si5 +data-types = V8HI, V8HI, QI + +/// lsx_vmini_w +name = lsx_vmini_w +asm-fmts = vd, vj, si5 +data-types = V4SI, V4SI, QI + +/// lsx_vmini_d +name = lsx_vmini_d +asm-fmts = vd, vj, si5 +data-types = V2DI, V2DI, QI + +/// lsx_vmin_bu +name = lsx_vmin_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vmin_hu +name = lsx_vmin_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vmin_wu +name = lsx_vmin_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vmin_du +name = lsx_vmin_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vmini_bu +name = lsx_vmini_bu +asm-fmts = vd, vj, ui5 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vmini_hu +name = lsx_vmini_hu +asm-fmts = vd, vj, ui5 +data-types = UV8HI, UV8HI, UQI + +/// lsx_vmini_wu +name = lsx_vmini_wu +asm-fmts = vd, vj, ui5 +data-types = UV4SI, UV4SI, UQI + +/// lsx_vmini_du +name = lsx_vmini_du +asm-fmts = vd, vj, ui5 +data-types = UV2DI, UV2DI, UQI + +/// lsx_vseq_b +name = lsx_vseq_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vseq_h +name = lsx_vseq_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vseq_w +name = lsx_vseq_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vseq_d +name = lsx_vseq_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vseqi_b +name = lsx_vseqi_b +asm-fmts = vd, vj, si5 +data-types = V16QI, V16QI, QI + +/// lsx_vseqi_h +name = lsx_vseqi_h +asm-fmts = vd, vj, si5 +data-types = V8HI, V8HI, QI + +/// lsx_vseqi_w +name = lsx_vseqi_w +asm-fmts = vd, vj, si5 +data-types = V4SI, V4SI, QI + +/// lsx_vseqi_d +name = lsx_vseqi_d +asm-fmts = vd, vj, si5 +data-types = V2DI, V2DI, QI + +/// lsx_vslti_b +name = lsx_vslti_b +asm-fmts = vd, vj, si5 +data-types = V16QI, V16QI, QI + +/// lsx_vslt_b +name = lsx_vslt_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vslt_h +name = lsx_vslt_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vslt_w +name = lsx_vslt_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vslt_d +name = lsx_vslt_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vslti_h +name = lsx_vslti_h +asm-fmts = vd, vj, si5 +data-types = V8HI, V8HI, QI + +/// lsx_vslti_w +name = lsx_vslti_w +asm-fmts = vd, vj, si5 +data-types = V4SI, V4SI, QI + +/// lsx_vslti_d +name = lsx_vslti_d +asm-fmts = vd, vj, si5 +data-types = V2DI, V2DI, QI + +/// lsx_vslt_bu +name = lsx_vslt_bu +asm-fmts = vd, vj, vk +data-types = V16QI, UV16QI, UV16QI + +/// lsx_vslt_hu +name = lsx_vslt_hu +asm-fmts = vd, vj, vk +data-types = V8HI, UV8HI, UV8HI + +/// lsx_vslt_wu +name = lsx_vslt_wu +asm-fmts = vd, vj, vk +data-types = V4SI, UV4SI, UV4SI + +/// lsx_vslt_du +name = lsx_vslt_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vslti_bu +name = lsx_vslti_bu +asm-fmts = vd, vj, ui5 +data-types = V16QI, UV16QI, UQI + +/// lsx_vslti_hu +name = lsx_vslti_hu +asm-fmts = vd, vj, ui5 +data-types = V8HI, UV8HI, UQI + +/// lsx_vslti_wu +name = lsx_vslti_wu +asm-fmts = vd, vj, ui5 +data-types = V4SI, UV4SI, UQI + +/// lsx_vslti_du +name = lsx_vslti_du +asm-fmts = vd, vj, ui5 +data-types = V2DI, UV2DI, UQI + +/// lsx_vsle_b +name = lsx_vsle_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsle_h +name = lsx_vsle_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsle_w +name = lsx_vsle_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsle_d +name = lsx_vsle_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vslei_b +name = lsx_vslei_b +asm-fmts = vd, vj, si5 +data-types = V16QI, V16QI, QI + +/// lsx_vslei_h +name = lsx_vslei_h +asm-fmts = vd, vj, si5 +data-types = V8HI, V8HI, QI + +/// lsx_vslei_w +name = lsx_vslei_w +asm-fmts = vd, vj, si5 +data-types = V4SI, V4SI, QI + +/// lsx_vslei_d +name = lsx_vslei_d +asm-fmts = vd, vj, si5 +data-types = V2DI, V2DI, QI + +/// lsx_vsle_bu +name = lsx_vsle_bu +asm-fmts = vd, vj, vk +data-types = V16QI, UV16QI, UV16QI + +/// lsx_vsle_hu +name = lsx_vsle_hu +asm-fmts = vd, vj, vk +data-types = V8HI, UV8HI, UV8HI + +/// lsx_vsle_wu +name = lsx_vsle_wu +asm-fmts = vd, vj, vk +data-types = V4SI, UV4SI, UV4SI + +/// lsx_vsle_du +name = lsx_vsle_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vslei_bu +name = lsx_vslei_bu +asm-fmts = vd, vj, ui5 +data-types = V16QI, UV16QI, UQI + +/// lsx_vslei_hu +name = lsx_vslei_hu +asm-fmts = vd, vj, ui5 +data-types = V8HI, UV8HI, UQI + +/// lsx_vslei_wu +name = lsx_vslei_wu +asm-fmts = vd, vj, ui5 +data-types = V4SI, UV4SI, UQI + +/// lsx_vslei_du +name = lsx_vslei_du +asm-fmts = vd, vj, ui5 +data-types = V2DI, UV2DI, UQI + +/// lsx_vsat_b +name = lsx_vsat_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vsat_h +name = lsx_vsat_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vsat_w +name = lsx_vsat_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vsat_d +name = lsx_vsat_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vsat_bu +name = lsx_vsat_bu +asm-fmts = vd, vj, ui3 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vsat_hu +name = lsx_vsat_hu +asm-fmts = vd, vj, ui4 +data-types = UV8HI, UV8HI, UQI + +/// lsx_vsat_wu +name = lsx_vsat_wu +asm-fmts = vd, vj, ui5 +data-types = UV4SI, UV4SI, UQI + +/// lsx_vsat_du +name = lsx_vsat_du +asm-fmts = vd, vj, ui6 +data-types = UV2DI, UV2DI, UQI + +/// lsx_vadda_b +name = lsx_vadda_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vadda_h +name = lsx_vadda_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vadda_w +name = lsx_vadda_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vadda_d +name = lsx_vadda_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsadd_b +name = lsx_vsadd_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsadd_h +name = lsx_vsadd_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsadd_w +name = lsx_vsadd_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsadd_d +name = lsx_vsadd_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsadd_bu +name = lsx_vsadd_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vsadd_hu +name = lsx_vsadd_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vsadd_wu +name = lsx_vsadd_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vsadd_du +name = lsx_vsadd_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vavg_b +name = lsx_vavg_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vavg_h +name = lsx_vavg_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vavg_w +name = lsx_vavg_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vavg_d +name = lsx_vavg_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vavg_bu +name = lsx_vavg_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vavg_hu +name = lsx_vavg_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vavg_wu +name = lsx_vavg_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vavg_du +name = lsx_vavg_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vavgr_b +name = lsx_vavgr_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vavgr_h +name = lsx_vavgr_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vavgr_w +name = lsx_vavgr_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vavgr_d +name = lsx_vavgr_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vavgr_bu +name = lsx_vavgr_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vavgr_hu +name = lsx_vavgr_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vavgr_wu +name = lsx_vavgr_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vavgr_du +name = lsx_vavgr_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vssub_b +name = lsx_vssub_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vssub_h +name = lsx_vssub_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vssub_w +name = lsx_vssub_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vssub_d +name = lsx_vssub_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vssub_bu +name = lsx_vssub_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vssub_hu +name = lsx_vssub_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vssub_wu +name = lsx_vssub_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vssub_du +name = lsx_vssub_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vabsd_b +name = lsx_vabsd_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vabsd_h +name = lsx_vabsd_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vabsd_w +name = lsx_vabsd_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vabsd_d +name = lsx_vabsd_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vabsd_bu +name = lsx_vabsd_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vabsd_hu +name = lsx_vabsd_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vabsd_wu +name = lsx_vabsd_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vabsd_du +name = lsx_vabsd_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vmul_b +name = lsx_vmul_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vmul_h +name = lsx_vmul_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vmul_w +name = lsx_vmul_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vmul_d +name = lsx_vmul_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmadd_b +name = lsx_vmadd_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI, V16QI + +/// lsx_vmadd_h +name = lsx_vmadd_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI, V8HI + +/// lsx_vmadd_w +name = lsx_vmadd_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI, V4SI + +/// lsx_vmadd_d +name = lsx_vmadd_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI, V2DI + +/// lsx_vmsub_b +name = lsx_vmsub_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI, V16QI + +/// lsx_vmsub_h +name = lsx_vmsub_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI, V8HI + +/// lsx_vmsub_w +name = lsx_vmsub_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI, V4SI + +/// lsx_vmsub_d +name = lsx_vmsub_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI, V2DI + +/// lsx_vdiv_b +name = lsx_vdiv_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vdiv_h +name = lsx_vdiv_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vdiv_w +name = lsx_vdiv_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vdiv_d +name = lsx_vdiv_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vdiv_bu +name = lsx_vdiv_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vdiv_hu +name = lsx_vdiv_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vdiv_wu +name = lsx_vdiv_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vdiv_du +name = lsx_vdiv_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vhaddw_h_b +name = lsx_vhaddw_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vhaddw_w_h +name = lsx_vhaddw_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vhaddw_d_w +name = lsx_vhaddw_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vhaddw_hu_bu +name = lsx_vhaddw_hu_bu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV16QI, UV16QI + +/// lsx_vhaddw_wu_hu +name = lsx_vhaddw_wu_hu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV8HI, UV8HI + +/// lsx_vhaddw_du_wu +name = lsx_vhaddw_du_wu +asm-fmts = vd, vj, vk +data-types = UV2DI, UV4SI, UV4SI + +/// lsx_vhsubw_h_b +name = lsx_vhsubw_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vhsubw_w_h +name = lsx_vhsubw_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vhsubw_d_w +name = lsx_vhsubw_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vhsubw_hu_bu +name = lsx_vhsubw_hu_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vhsubw_wu_hu +name = lsx_vhsubw_wu_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vhsubw_du_wu +name = lsx_vhsubw_du_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vmod_b +name = lsx_vmod_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vmod_h +name = lsx_vmod_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vmod_w +name = lsx_vmod_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vmod_d +name = lsx_vmod_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmod_bu +name = lsx_vmod_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vmod_hu +name = lsx_vmod_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vmod_wu +name = lsx_vmod_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vmod_du +name = lsx_vmod_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vreplve_b +name = lsx_vreplve_b +asm-fmts = vd, vj, rk +data-types = V16QI, V16QI, SI + +/// lsx_vreplve_h +name = lsx_vreplve_h +asm-fmts = vd, vj, rk +data-types = V8HI, V8HI, SI + +/// lsx_vreplve_w +name = lsx_vreplve_w +asm-fmts = vd, vj, rk +data-types = V4SI, V4SI, SI + +/// lsx_vreplve_d +name = lsx_vreplve_d +asm-fmts = vd, vj, rk +data-types = V2DI, V2DI, SI + +/// lsx_vreplvei_b +name = lsx_vreplvei_b +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, UQI + +/// lsx_vreplvei_h +name = lsx_vreplvei_h +asm-fmts = vd, vj, ui3 +data-types = V8HI, V8HI, UQI + +/// lsx_vreplvei_w +name = lsx_vreplvei_w +asm-fmts = vd, vj, ui2 +data-types = V4SI, V4SI, UQI + +/// lsx_vreplvei_d +name = lsx_vreplvei_d +asm-fmts = vd, vj, ui1 +data-types = V2DI, V2DI, UQI + +/// lsx_vpickev_b +name = lsx_vpickev_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vpickev_h +name = lsx_vpickev_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vpickev_w +name = lsx_vpickev_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vpickev_d +name = lsx_vpickev_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vpickod_b +name = lsx_vpickod_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vpickod_h +name = lsx_vpickod_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vpickod_w +name = lsx_vpickod_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vpickod_d +name = lsx_vpickod_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vilvh_b +name = lsx_vilvh_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vilvh_h +name = lsx_vilvh_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vilvh_w +name = lsx_vilvh_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vilvh_d +name = lsx_vilvh_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vilvl_b +name = lsx_vilvl_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vilvl_h +name = lsx_vilvl_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vilvl_w +name = lsx_vilvl_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vilvl_d +name = lsx_vilvl_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vpackev_b +name = lsx_vpackev_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vpackev_h +name = lsx_vpackev_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vpackev_w +name = lsx_vpackev_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vpackev_d +name = lsx_vpackev_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vpackod_b +name = lsx_vpackod_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vpackod_h +name = lsx_vpackod_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vpackod_w +name = lsx_vpackod_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vpackod_d +name = lsx_vpackod_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vshuf_h +name = lsx_vshuf_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI, V8HI + +/// lsx_vshuf_w +name = lsx_vshuf_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI, V4SI + +/// lsx_vshuf_d +name = lsx_vshuf_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI, V2DI + +/// lsx_vand_v +name = lsx_vand_v +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vandi_b +name = lsx_vandi_b +asm-fmts = vd, vj, ui8 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vor_v +name = lsx_vor_v +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vori_b +name = lsx_vori_b +asm-fmts = vd, vj, ui8 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vnor_v +name = lsx_vnor_v +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vnori_b +name = lsx_vnori_b +asm-fmts = vd, vj, ui8 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vxor_v +name = lsx_vxor_v +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vxori_b +name = lsx_vxori_b +asm-fmts = vd, vj, ui8 +data-types = UV16QI, UV16QI, UQI + +/// lsx_vbitsel_v +name = lsx_vbitsel_v +asm-fmts = vd, vj, vk, va +data-types = UV16QI, UV16QI, UV16QI, UV16QI + +/// lsx_vbitseli_b +name = lsx_vbitseli_b +asm-fmts = vd, vj, ui8 +data-types = UV16QI, UV16QI, UV16QI, USI + +/// lsx_vshuf4i_b +name = lsx_vshuf4i_b +asm-fmts = vd, vj, ui8 +data-types = V16QI, V16QI, USI + +/// lsx_vshuf4i_h +name = lsx_vshuf4i_h +asm-fmts = vd, vj, ui8 +data-types = V8HI, V8HI, USI + +/// lsx_vshuf4i_w +name = lsx_vshuf4i_w +asm-fmts = vd, vj, ui8 +data-types = V4SI, V4SI, USI + +/// lsx_vreplgr2vr_b +name = lsx_vreplgr2vr_b +asm-fmts = vd, rj +data-types = V16QI, SI + +/// lsx_vreplgr2vr_h +name = lsx_vreplgr2vr_h +asm-fmts = vd, rj +data-types = V8HI, SI + +/// lsx_vreplgr2vr_w +name = lsx_vreplgr2vr_w +asm-fmts = vd, rj +data-types = V4SI, SI + +/// lsx_vreplgr2vr_d +name = lsx_vreplgr2vr_d +asm-fmts = vd, rj +data-types = V2DI, DI + +/// lsx_vpcnt_b +name = lsx_vpcnt_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vpcnt_h +name = lsx_vpcnt_h +asm-fmts = vd, vj +data-types = V8HI, V8HI + +/// lsx_vpcnt_w +name = lsx_vpcnt_w +asm-fmts = vd, vj +data-types = V4SI, V4SI + +/// lsx_vpcnt_d +name = lsx_vpcnt_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vclo_b +name = lsx_vclo_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vclo_h +name = lsx_vclo_h +asm-fmts = vd, vj +data-types = V8HI, V8HI + +/// lsx_vclo_w +name = lsx_vclo_w +asm-fmts = vd, vj +data-types = V4SI, V4SI + +/// lsx_vclo_d +name = lsx_vclo_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vclz_b +name = lsx_vclz_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vclz_h +name = lsx_vclz_h +asm-fmts = vd, vj +data-types = V8HI, V8HI + +/// lsx_vclz_w +name = lsx_vclz_w +asm-fmts = vd, vj +data-types = V4SI, V4SI + +/// lsx_vclz_d +name = lsx_vclz_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vpickve2gr_b +name = lsx_vpickve2gr_b +asm-fmts = rd, vj, ui4 +data-types = SI, V16QI, UQI + +/// lsx_vpickve2gr_h +name = lsx_vpickve2gr_h +asm-fmts = rd, vj, ui3 +data-types = SI, V8HI, UQI + +/// lsx_vpickve2gr_w +name = lsx_vpickve2gr_w +asm-fmts = rd, vj, ui2 +data-types = SI, V4SI, UQI + +/// lsx_vpickve2gr_d +name = lsx_vpickve2gr_d +asm-fmts = rd, vj, ui1 +data-types = DI, V2DI, UQI + +/// lsx_vpickve2gr_bu +name = lsx_vpickve2gr_bu +asm-fmts = rd, vj, ui4 +data-types = USI, V16QI, UQI + +/// lsx_vpickve2gr_hu +name = lsx_vpickve2gr_hu +asm-fmts = rd, vj, ui3 +data-types = USI, V8HI, UQI + +/// lsx_vpickve2gr_wu +name = lsx_vpickve2gr_wu +asm-fmts = rd, vj, ui2 +data-types = USI, V4SI, UQI + +/// lsx_vpickve2gr_du +name = lsx_vpickve2gr_du +asm-fmts = rd, vj, ui1 +data-types = UDI, V2DI, UQI + +/// lsx_vinsgr2vr_b +name = lsx_vinsgr2vr_b +asm-fmts = vd, rj, ui4 +data-types = V16QI, V16QI, SI, UQI + +/// lsx_vinsgr2vr_h +name = lsx_vinsgr2vr_h +asm-fmts = vd, rj, ui3 +data-types = V8HI, V8HI, SI, UQI + +/// lsx_vinsgr2vr_w +name = lsx_vinsgr2vr_w +asm-fmts = vd, rj, ui2 +data-types = V4SI, V4SI, SI, UQI + +/// lsx_vinsgr2vr_d +name = lsx_vinsgr2vr_d +asm-fmts = vd, rj, ui1 +data-types = V2DI, V2DI, DI, UQI + +/// lsx_vfadd_s +name = lsx_vfadd_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfadd_d +name = lsx_vfadd_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfsub_s +name = lsx_vfsub_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfsub_d +name = lsx_vfsub_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfmul_s +name = lsx_vfmul_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfmul_d +name = lsx_vfmul_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfdiv_s +name = lsx_vfdiv_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfdiv_d +name = lsx_vfdiv_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfcvt_h_s +name = lsx_vfcvt_h_s +asm-fmts = vd, vj, vk +data-types = V8HI, V4SF, V4SF + +/// lsx_vfcvt_s_d +name = lsx_vfcvt_s_d +asm-fmts = vd, vj, vk +data-types = V4SF, V2DF, V2DF + +/// lsx_vfmin_s +name = lsx_vfmin_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfmin_d +name = lsx_vfmin_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfmina_s +name = lsx_vfmina_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfmina_d +name = lsx_vfmina_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfmax_s +name = lsx_vfmax_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfmax_d +name = lsx_vfmax_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfmaxa_s +name = lsx_vfmaxa_s +asm-fmts = vd, vj, vk +data-types = V4SF, V4SF, V4SF + +/// lsx_vfmaxa_d +name = lsx_vfmaxa_d +asm-fmts = vd, vj, vk +data-types = V2DF, V2DF, V2DF + +/// lsx_vfclass_s +name = lsx_vfclass_s +asm-fmts = vd, vj +data-types = V4SI, V4SF + +/// lsx_vfclass_d +name = lsx_vfclass_d +asm-fmts = vd, vj +data-types = V2DI, V2DF + +/// lsx_vfsqrt_s +name = lsx_vfsqrt_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfsqrt_d +name = lsx_vfsqrt_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfrecip_s +name = lsx_vfrecip_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrecip_d +name = lsx_vfrecip_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfrint_s +name = lsx_vfrint_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrint_d +name = lsx_vfrint_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfrsqrt_s +name = lsx_vfrsqrt_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrsqrt_d +name = lsx_vfrsqrt_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vflogb_s +name = lsx_vflogb_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vflogb_d +name = lsx_vflogb_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfcvth_s_h +name = lsx_vfcvth_s_h +asm-fmts = vd, vj +data-types = V4SF, V8HI + +/// lsx_vfcvth_d_s +name = lsx_vfcvth_d_s +asm-fmts = vd, vj +data-types = V2DF, V4SF + +/// lsx_vfcvtl_s_h +name = lsx_vfcvtl_s_h +asm-fmts = vd, vj +data-types = V4SF, V8HI + +/// lsx_vfcvtl_d_s +name = lsx_vfcvtl_d_s +asm-fmts = vd, vj +data-types = V2DF, V4SF + +/// lsx_vftint_w_s +name = lsx_vftint_w_s +asm-fmts = vd, vj +data-types = V4SI, V4SF + +/// lsx_vftint_l_d +name = lsx_vftint_l_d +asm-fmts = vd, vj +data-types = V2DI, V2DF + +/// lsx_vftint_wu_s +name = lsx_vftint_wu_s +asm-fmts = vd, vj +data-types = UV4SI, V4SF + +/// lsx_vftint_lu_d +name = lsx_vftint_lu_d +asm-fmts = vd, vj +data-types = UV2DI, V2DF + +/// lsx_vftintrz_w_s +name = lsx_vftintrz_w_s +asm-fmts = vd, vj +data-types = V4SI, V4SF + +/// lsx_vftintrz_l_d +name = lsx_vftintrz_l_d +asm-fmts = vd, vj +data-types = V2DI, V2DF + +/// lsx_vftintrz_wu_s +name = lsx_vftintrz_wu_s +asm-fmts = vd, vj +data-types = UV4SI, V4SF + +/// lsx_vftintrz_lu_d +name = lsx_vftintrz_lu_d +asm-fmts = vd, vj +data-types = UV2DI, V2DF + +/// lsx_vffint_s_w +name = lsx_vffint_s_w +asm-fmts = vd, vj +data-types = V4SF, V4SI + +/// lsx_vffint_d_l +name = lsx_vffint_d_l +asm-fmts = vd, vj +data-types = V2DF, V2DI + +/// lsx_vffint_s_wu +name = lsx_vffint_s_wu +asm-fmts = vd, vj +data-types = V4SF, UV4SI + +/// lsx_vffint_d_lu +name = lsx_vffint_d_lu +asm-fmts = vd, vj +data-types = V2DF, UV2DI + +/// lsx_vandn_v +name = lsx_vandn_v +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vneg_b +name = lsx_vneg_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vneg_h +name = lsx_vneg_h +asm-fmts = vd, vj +data-types = V8HI, V8HI + +/// lsx_vneg_w +name = lsx_vneg_w +asm-fmts = vd, vj +data-types = V4SI, V4SI + +/// lsx_vneg_d +name = lsx_vneg_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vmuh_b +name = lsx_vmuh_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vmuh_h +name = lsx_vmuh_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vmuh_w +name = lsx_vmuh_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vmuh_d +name = lsx_vmuh_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmuh_bu +name = lsx_vmuh_bu +asm-fmts = vd, vj, vk +data-types = UV16QI, UV16QI, UV16QI + +/// lsx_vmuh_hu +name = lsx_vmuh_hu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV8HI + +/// lsx_vmuh_wu +name = lsx_vmuh_wu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV4SI + +/// lsx_vmuh_du +name = lsx_vmuh_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vsllwil_h_b +name = lsx_vsllwil_h_b +asm-fmts = vd, vj, ui3 +data-types = V8HI, V16QI, UQI + +/// lsx_vsllwil_w_h +name = lsx_vsllwil_w_h +asm-fmts = vd, vj, ui4 +data-types = V4SI, V8HI, UQI + +/// lsx_vsllwil_d_w +name = lsx_vsllwil_d_w +asm-fmts = vd, vj, ui5 +data-types = V2DI, V4SI, UQI + +/// lsx_vsllwil_hu_bu +name = lsx_vsllwil_hu_bu +asm-fmts = vd, vj, ui3 +data-types = UV8HI, UV16QI, UQI + +/// lsx_vsllwil_wu_hu +name = lsx_vsllwil_wu_hu +asm-fmts = vd, vj, ui4 +data-types = UV4SI, UV8HI, UQI + +/// lsx_vsllwil_du_wu +name = lsx_vsllwil_du_wu +asm-fmts = vd, vj, ui5 +data-types = UV2DI, UV4SI, UQI + +/// lsx_vsran_b_h +name = lsx_vsran_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vsran_h_w +name = lsx_vsran_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vsran_w_d +name = lsx_vsran_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssran_b_h +name = lsx_vssran_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vssran_h_w +name = lsx_vssran_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vssran_w_d +name = lsx_vssran_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssran_bu_h +name = lsx_vssran_bu_h +asm-fmts = vd, vj, vk +data-types = UV16QI, UV8HI, UV8HI + +/// lsx_vssran_hu_w +name = lsx_vssran_hu_w +asm-fmts = vd, vj, vk +data-types = UV8HI, UV4SI, UV4SI + +/// lsx_vssran_wu_d +name = lsx_vssran_wu_d +asm-fmts = vd, vj, vk +data-types = UV4SI, UV2DI, UV2DI + +/// lsx_vsrarn_b_h +name = lsx_vsrarn_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vsrarn_h_w +name = lsx_vsrarn_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vsrarn_w_d +name = lsx_vsrarn_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssrarn_b_h +name = lsx_vssrarn_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vssrarn_h_w +name = lsx_vssrarn_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vssrarn_w_d +name = lsx_vssrarn_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssrarn_bu_h +name = lsx_vssrarn_bu_h +asm-fmts = vd, vj, vk +data-types = UV16QI, UV8HI, UV8HI + +/// lsx_vssrarn_hu_w +name = lsx_vssrarn_hu_w +asm-fmts = vd, vj, vk +data-types = UV8HI, UV4SI, UV4SI + +/// lsx_vssrarn_wu_d +name = lsx_vssrarn_wu_d +asm-fmts = vd, vj, vk +data-types = UV4SI, UV2DI, UV2DI + +/// lsx_vsrln_b_h +name = lsx_vsrln_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vsrln_h_w +name = lsx_vsrln_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vsrln_w_d +name = lsx_vsrln_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssrln_bu_h +name = lsx_vssrln_bu_h +asm-fmts = vd, vj, vk +data-types = UV16QI, UV8HI, UV8HI + +/// lsx_vssrln_hu_w +name = lsx_vssrln_hu_w +asm-fmts = vd, vj, vk +data-types = UV8HI, UV4SI, UV4SI + +/// lsx_vssrln_wu_d +name = lsx_vssrln_wu_d +asm-fmts = vd, vj, vk +data-types = UV4SI, UV2DI, UV2DI + +/// lsx_vsrlrn_b_h +name = lsx_vsrlrn_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vsrlrn_h_w +name = lsx_vsrlrn_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vsrlrn_w_d +name = lsx_vsrlrn_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssrlrn_bu_h +name = lsx_vssrlrn_bu_h +asm-fmts = vd, vj, vk +data-types = UV16QI, UV8HI, UV8HI + +/// lsx_vssrlrn_hu_w +name = lsx_vssrlrn_hu_w +asm-fmts = vd, vj, vk +data-types = UV8HI, UV4SI, UV4SI + +/// lsx_vssrlrn_wu_d +name = lsx_vssrlrn_wu_d +asm-fmts = vd, vj, vk +data-types = UV4SI, UV2DI, UV2DI + +/// lsx_vfrstpi_b +name = lsx_vfrstpi_b +asm-fmts = vd, vj, ui5 +data-types = V16QI, V16QI, V16QI, UQI + +/// lsx_vfrstpi_h +name = lsx_vfrstpi_h +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, UQI + +/// lsx_vfrstp_b +name = lsx_vfrstp_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI, V16QI + +/// lsx_vfrstp_h +name = lsx_vfrstp_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI, V8HI + +/// lsx_vshuf4i_d +name = lsx_vshuf4i_d +asm-fmts = vd, vj, ui8 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vbsrl_v +name = lsx_vbsrl_v +asm-fmts = vd, vj, ui5 +data-types = V16QI, V16QI, UQI + +/// lsx_vbsll_v +name = lsx_vbsll_v +asm-fmts = vd, vj, ui5 +data-types = V16QI, V16QI, UQI + +/// lsx_vextrins_b +name = lsx_vextrins_b +asm-fmts = vd, vj, ui8 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vextrins_h +name = lsx_vextrins_h +asm-fmts = vd, vj, ui8 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vextrins_w +name = lsx_vextrins_w +asm-fmts = vd, vj, ui8 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vextrins_d +name = lsx_vextrins_d +asm-fmts = vd, vj, ui8 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vmskltz_b +name = lsx_vmskltz_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vmskltz_h +name = lsx_vmskltz_h +asm-fmts = vd, vj +data-types = V8HI, V8HI + +/// lsx_vmskltz_w +name = lsx_vmskltz_w +asm-fmts = vd, vj +data-types = V4SI, V4SI + +/// lsx_vmskltz_d +name = lsx_vmskltz_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vsigncov_b +name = lsx_vsigncov_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vsigncov_h +name = lsx_vsigncov_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vsigncov_w +name = lsx_vsigncov_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vsigncov_d +name = lsx_vsigncov_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vfmadd_s +name = lsx_vfmadd_s +asm-fmts = vd, vj, vk, va +data-types = V4SF, V4SF, V4SF, V4SF + +/// lsx_vfmadd_d +name = lsx_vfmadd_d +asm-fmts = vd, vj, vk, va +data-types = V2DF, V2DF, V2DF, V2DF + +/// lsx_vfmsub_s +name = lsx_vfmsub_s +asm-fmts = vd, vj, vk, va +data-types = V4SF, V4SF, V4SF, V4SF + +/// lsx_vfmsub_d +name = lsx_vfmsub_d +asm-fmts = vd, vj, vk, va +data-types = V2DF, V2DF, V2DF, V2DF + +/// lsx_vfnmadd_s +name = lsx_vfnmadd_s +asm-fmts = vd, vj, vk, va +data-types = V4SF, V4SF, V4SF, V4SF + +/// lsx_vfnmadd_d +name = lsx_vfnmadd_d +asm-fmts = vd, vj, vk, va +data-types = V2DF, V2DF, V2DF, V2DF + +/// lsx_vfnmsub_s +name = lsx_vfnmsub_s +asm-fmts = vd, vj, vk, va +data-types = V4SF, V4SF, V4SF, V4SF + +/// lsx_vfnmsub_d +name = lsx_vfnmsub_d +asm-fmts = vd, vj, vk, va +data-types = V2DF, V2DF, V2DF, V2DF + +/// lsx_vftintrne_w_s +name = lsx_vftintrne_w_s +asm-fmts = vd, vj +data-types = V4SI, V4SF + +/// lsx_vftintrne_l_d +name = lsx_vftintrne_l_d +asm-fmts = vd, vj +data-types = V2DI, V2DF + +/// lsx_vftintrp_w_s +name = lsx_vftintrp_w_s +asm-fmts = vd, vj +data-types = V4SI, V4SF + +/// lsx_vftintrp_l_d +name = lsx_vftintrp_l_d +asm-fmts = vd, vj +data-types = V2DI, V2DF + +/// lsx_vftintrm_w_s +name = lsx_vftintrm_w_s +asm-fmts = vd, vj +data-types = V4SI, V4SF + +/// lsx_vftintrm_l_d +name = lsx_vftintrm_l_d +asm-fmts = vd, vj +data-types = V2DI, V2DF + +/// lsx_vftint_w_d +name = lsx_vftint_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DF, V2DF + +/// lsx_vffint_s_l +name = lsx_vffint_s_l +asm-fmts = vd, vj, vk +data-types = V4SF, V2DI, V2DI + +/// lsx_vftintrz_w_d +name = lsx_vftintrz_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DF, V2DF + +/// lsx_vftintrp_w_d +name = lsx_vftintrp_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DF, V2DF + +/// lsx_vftintrm_w_d +name = lsx_vftintrm_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DF, V2DF + +/// lsx_vftintrne_w_d +name = lsx_vftintrne_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DF, V2DF + +/// lsx_vftintl_l_s +name = lsx_vftintl_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftinth_l_s +name = lsx_vftinth_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vffinth_d_w +name = lsx_vffinth_d_w +asm-fmts = vd, vj +data-types = V2DF, V4SI + +/// lsx_vffintl_d_w +name = lsx_vffintl_d_w +asm-fmts = vd, vj +data-types = V2DF, V4SI + +/// lsx_vftintrzl_l_s +name = lsx_vftintrzl_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrzh_l_s +name = lsx_vftintrzh_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrpl_l_s +name = lsx_vftintrpl_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrph_l_s +name = lsx_vftintrph_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrml_l_s +name = lsx_vftintrml_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrmh_l_s +name = lsx_vftintrmh_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrnel_l_s +name = lsx_vftintrnel_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vftintrneh_l_s +name = lsx_vftintrneh_l_s +asm-fmts = vd, vj +data-types = V2DI, V4SF + +/// lsx_vfrintrne_s +name = lsx_vfrintrne_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrintrne_d +name = lsx_vfrintrne_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfrintrz_s +name = lsx_vfrintrz_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrintrz_d +name = lsx_vfrintrz_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfrintrp_s +name = lsx_vfrintrp_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrintrp_d +name = lsx_vfrintrp_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vfrintrm_s +name = lsx_vfrintrm_s +asm-fmts = vd, vj +data-types = V4SF, V4SF + +/// lsx_vfrintrm_d +name = lsx_vfrintrm_d +asm-fmts = vd, vj +data-types = V2DF, V2DF + +/// lsx_vstelm_b +name = lsx_vstelm_b +asm-fmts = vd, rj, si8, idx +data-types = VOID, V16QI, CVPOINTER, SI, UQI + +/// lsx_vstelm_h +name = lsx_vstelm_h +asm-fmts = vd, rj, si8, idx +data-types = VOID, V8HI, CVPOINTER, SI, UQI + +/// lsx_vstelm_w +name = lsx_vstelm_w +asm-fmts = vd, rj, si8, idx +data-types = VOID, V4SI, CVPOINTER, SI, UQI + +/// lsx_vstelm_d +name = lsx_vstelm_d +asm-fmts = vd, rj, si8, idx +data-types = VOID, V2DI, CVPOINTER, SI, UQI + +/// lsx_vaddwev_d_w +name = lsx_vaddwev_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vaddwev_w_h +name = lsx_vaddwev_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vaddwev_h_b +name = lsx_vaddwev_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vaddwod_d_w +name = lsx_vaddwod_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vaddwod_w_h +name = lsx_vaddwod_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vaddwod_h_b +name = lsx_vaddwod_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vaddwev_d_wu +name = lsx_vaddwev_d_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vaddwev_w_hu +name = lsx_vaddwev_w_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vaddwev_h_bu +name = lsx_vaddwev_h_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vaddwod_d_wu +name = lsx_vaddwod_d_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vaddwod_w_hu +name = lsx_vaddwod_w_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vaddwod_h_bu +name = lsx_vaddwod_h_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vaddwev_d_wu_w +name = lsx_vaddwev_d_wu_w +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, V4SI + +/// lsx_vaddwev_w_hu_h +name = lsx_vaddwev_w_hu_h +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, V8HI + +/// lsx_vaddwev_h_bu_b +name = lsx_vaddwev_h_bu_b +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, V16QI + +/// lsx_vaddwod_d_wu_w +name = lsx_vaddwod_d_wu_w +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, V4SI + +/// lsx_vaddwod_w_hu_h +name = lsx_vaddwod_w_hu_h +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, V8HI + +/// lsx_vaddwod_h_bu_b +name = lsx_vaddwod_h_bu_b +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, V16QI + +/// lsx_vsubwev_d_w +name = lsx_vsubwev_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vsubwev_w_h +name = lsx_vsubwev_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vsubwev_h_b +name = lsx_vsubwev_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vsubwod_d_w +name = lsx_vsubwod_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vsubwod_w_h +name = lsx_vsubwod_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vsubwod_h_b +name = lsx_vsubwod_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vsubwev_d_wu +name = lsx_vsubwev_d_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vsubwev_w_hu +name = lsx_vsubwev_w_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vsubwev_h_bu +name = lsx_vsubwev_h_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vsubwod_d_wu +name = lsx_vsubwod_d_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vsubwod_w_hu +name = lsx_vsubwod_w_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vsubwod_h_bu +name = lsx_vsubwod_h_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vaddwev_q_d +name = lsx_vaddwev_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vaddwod_q_d +name = lsx_vaddwod_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vaddwev_q_du +name = lsx_vaddwev_q_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vaddwod_q_du +name = lsx_vaddwod_q_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vsubwev_q_d +name = lsx_vsubwev_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsubwod_q_d +name = lsx_vsubwod_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsubwev_q_du +name = lsx_vsubwev_q_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vsubwod_q_du +name = lsx_vsubwod_q_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vaddwev_q_du_d +name = lsx_vaddwev_q_du_d +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, V2DI + +/// lsx_vaddwod_q_du_d +name = lsx_vaddwod_q_du_d +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, V2DI + +/// lsx_vmulwev_d_w +name = lsx_vmulwev_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vmulwev_w_h +name = lsx_vmulwev_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vmulwev_h_b +name = lsx_vmulwev_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vmulwod_d_w +name = lsx_vmulwod_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V4SI, V4SI + +/// lsx_vmulwod_w_h +name = lsx_vmulwod_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V8HI, V8HI + +/// lsx_vmulwod_h_b +name = lsx_vmulwod_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V16QI, V16QI + +/// lsx_vmulwev_d_wu +name = lsx_vmulwev_d_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vmulwev_w_hu +name = lsx_vmulwev_w_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vmulwev_h_bu +name = lsx_vmulwev_h_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vmulwod_d_wu +name = lsx_vmulwod_d_wu +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, UV4SI + +/// lsx_vmulwod_w_hu +name = lsx_vmulwod_w_hu +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, UV8HI + +/// lsx_vmulwod_h_bu +name = lsx_vmulwod_h_bu +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, UV16QI + +/// lsx_vmulwev_d_wu_w +name = lsx_vmulwev_d_wu_w +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, V4SI + +/// lsx_vmulwev_w_hu_h +name = lsx_vmulwev_w_hu_h +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, V8HI + +/// lsx_vmulwev_h_bu_b +name = lsx_vmulwev_h_bu_b +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, V16QI + +/// lsx_vmulwod_d_wu_w +name = lsx_vmulwod_d_wu_w +asm-fmts = vd, vj, vk +data-types = V2DI, UV4SI, V4SI + +/// lsx_vmulwod_w_hu_h +name = lsx_vmulwod_w_hu_h +asm-fmts = vd, vj, vk +data-types = V4SI, UV8HI, V8HI + +/// lsx_vmulwod_h_bu_b +name = lsx_vmulwod_h_bu_b +asm-fmts = vd, vj, vk +data-types = V8HI, UV16QI, V16QI + +/// lsx_vmulwev_q_d +name = lsx_vmulwev_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmulwod_q_d +name = lsx_vmulwod_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vmulwev_q_du +name = lsx_vmulwev_q_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vmulwod_q_du +name = lsx_vmulwod_q_du +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, UV2DI + +/// lsx_vmulwev_q_du_d +name = lsx_vmulwev_q_du_d +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, V2DI + +/// lsx_vmulwod_q_du_d +name = lsx_vmulwod_q_du_d +asm-fmts = vd, vj, vk +data-types = V2DI, UV2DI, V2DI + +/// lsx_vhaddw_q_d +name = lsx_vhaddw_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vhaddw_qu_du +name = lsx_vhaddw_qu_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vhsubw_q_d +name = lsx_vhsubw_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vhsubw_qu_du +name = lsx_vhsubw_qu_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI + +/// lsx_vmaddwev_d_w +name = lsx_vmaddwev_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V4SI, V4SI + +/// lsx_vmaddwev_w_h +name = lsx_vmaddwev_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V8HI, V8HI + +/// lsx_vmaddwev_h_b +name = lsx_vmaddwev_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V16QI, V16QI + +/// lsx_vmaddwev_d_wu +name = lsx_vmaddwev_d_wu +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV4SI, UV4SI + +/// lsx_vmaddwev_w_hu +name = lsx_vmaddwev_w_hu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV8HI, UV8HI + +/// lsx_vmaddwev_h_bu +name = lsx_vmaddwev_h_bu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV16QI, UV16QI + +/// lsx_vmaddwod_d_w +name = lsx_vmaddwod_d_w +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V4SI, V4SI + +/// lsx_vmaddwod_w_h +name = lsx_vmaddwod_w_h +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V8HI, V8HI + +/// lsx_vmaddwod_h_b +name = lsx_vmaddwod_h_b +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V16QI, V16QI + +/// lsx_vmaddwod_d_wu +name = lsx_vmaddwod_d_wu +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV4SI, UV4SI + +/// lsx_vmaddwod_w_hu +name = lsx_vmaddwod_w_hu +asm-fmts = vd, vj, vk +data-types = UV4SI, UV4SI, UV8HI, UV8HI + +/// lsx_vmaddwod_h_bu +name = lsx_vmaddwod_h_bu +asm-fmts = vd, vj, vk +data-types = UV8HI, UV8HI, UV16QI, UV16QI + +/// lsx_vmaddwev_d_wu_w +name = lsx_vmaddwev_d_wu_w +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, UV4SI, V4SI + +/// lsx_vmaddwev_w_hu_h +name = lsx_vmaddwev_w_hu_h +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, UV8HI, V8HI + +/// lsx_vmaddwev_h_bu_b +name = lsx_vmaddwev_h_bu_b +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, UV16QI, V16QI + +/// lsx_vmaddwod_d_wu_w +name = lsx_vmaddwod_d_wu_w +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, UV4SI, V4SI + +/// lsx_vmaddwod_w_hu_h +name = lsx_vmaddwod_w_hu_h +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, UV8HI, V8HI + +/// lsx_vmaddwod_h_bu_b +name = lsx_vmaddwod_h_bu_b +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, UV16QI, V16QI + +/// lsx_vmaddwev_q_d +name = lsx_vmaddwev_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI, V2DI + +/// lsx_vmaddwod_q_d +name = lsx_vmaddwod_q_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI, V2DI + +/// lsx_vmaddwev_q_du +name = lsx_vmaddwev_q_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI, UV2DI + +/// lsx_vmaddwod_q_du +name = lsx_vmaddwod_q_du +asm-fmts = vd, vj, vk +data-types = UV2DI, UV2DI, UV2DI, UV2DI + +/// lsx_vmaddwev_q_du_d +name = lsx_vmaddwev_q_du_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, UV2DI, V2DI + +/// lsx_vmaddwod_q_du_d +name = lsx_vmaddwod_q_du_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, UV2DI, V2DI + +/// lsx_vrotr_b +name = lsx_vrotr_b +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vrotr_h +name = lsx_vrotr_h +asm-fmts = vd, vj, vk +data-types = V8HI, V8HI, V8HI + +/// lsx_vrotr_w +name = lsx_vrotr_w +asm-fmts = vd, vj, vk +data-types = V4SI, V4SI, V4SI + +/// lsx_vrotr_d +name = lsx_vrotr_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vadd_q +name = lsx_vadd_q +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vsub_q +name = lsx_vsub_q +asm-fmts = vd, vj, vk +data-types = V2DI, V2DI, V2DI + +/// lsx_vldrepl_b +name = lsx_vldrepl_b +asm-fmts = vd, rj, si12 +data-types = V16QI, CVPOINTER, SI + +/// lsx_vldrepl_h +name = lsx_vldrepl_h +asm-fmts = vd, rj, si11 +data-types = V8HI, CVPOINTER, SI + +/// lsx_vldrepl_w +name = lsx_vldrepl_w +asm-fmts = vd, rj, si10 +data-types = V4SI, CVPOINTER, SI + +/// lsx_vldrepl_d +name = lsx_vldrepl_d +asm-fmts = vd, rj, si9 +data-types = V2DI, CVPOINTER, SI + +/// lsx_vmskgez_b +name = lsx_vmskgez_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vmsknz_b +name = lsx_vmsknz_b +asm-fmts = vd, vj +data-types = V16QI, V16QI + +/// lsx_vexth_h_b +name = lsx_vexth_h_b +asm-fmts = vd, vj +data-types = V8HI, V16QI + +/// lsx_vexth_w_h +name = lsx_vexth_w_h +asm-fmts = vd, vj +data-types = V4SI, V8HI + +/// lsx_vexth_d_w +name = lsx_vexth_d_w +asm-fmts = vd, vj +data-types = V2DI, V4SI + +/// lsx_vexth_q_d +name = lsx_vexth_q_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vexth_hu_bu +name = lsx_vexth_hu_bu +asm-fmts = vd, vj +data-types = UV8HI, UV16QI + +/// lsx_vexth_wu_hu +name = lsx_vexth_wu_hu +asm-fmts = vd, vj +data-types = UV4SI, UV8HI + +/// lsx_vexth_du_wu +name = lsx_vexth_du_wu +asm-fmts = vd, vj +data-types = UV2DI, UV4SI + +/// lsx_vexth_qu_du +name = lsx_vexth_qu_du +asm-fmts = vd, vj +data-types = UV2DI, UV2DI + +/// lsx_vrotri_b +name = lsx_vrotri_b +asm-fmts = vd, vj, ui3 +data-types = V16QI, V16QI, UQI + +/// lsx_vrotri_h +name = lsx_vrotri_h +asm-fmts = vd, vj, ui4 +data-types = V8HI, V8HI, UQI + +/// lsx_vrotri_w +name = lsx_vrotri_w +asm-fmts = vd, vj, ui5 +data-types = V4SI, V4SI, UQI + +/// lsx_vrotri_d +name = lsx_vrotri_d +asm-fmts = vd, vj, ui6 +data-types = V2DI, V2DI, UQI + +/// lsx_vextl_q_d +name = lsx_vextl_q_d +asm-fmts = vd, vj +data-types = V2DI, V2DI + +/// lsx_vsrlni_b_h +name = lsx_vsrlni_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vsrlni_h_w +name = lsx_vsrlni_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vsrlni_w_d +name = lsx_vsrlni_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vsrlni_d_q +name = lsx_vsrlni_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vsrlrni_b_h +name = lsx_vsrlrni_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vsrlrni_h_w +name = lsx_vsrlrni_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vsrlrni_w_d +name = lsx_vsrlrni_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vsrlrni_d_q +name = lsx_vsrlrni_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vssrlni_b_h +name = lsx_vssrlni_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vssrlni_h_w +name = lsx_vssrlni_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vssrlni_w_d +name = lsx_vssrlni_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vssrlni_d_q +name = lsx_vssrlni_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vssrlni_bu_h +name = lsx_vssrlni_bu_h +asm-fmts = vd, vj, ui4 +data-types = UV16QI, UV16QI, V16QI, USI + +/// lsx_vssrlni_hu_w +name = lsx_vssrlni_hu_w +asm-fmts = vd, vj, ui5 +data-types = UV8HI, UV8HI, V8HI, USI + +/// lsx_vssrlni_wu_d +name = lsx_vssrlni_wu_d +asm-fmts = vd, vj, ui6 +data-types = UV4SI, UV4SI, V4SI, USI + +/// lsx_vssrlni_du_q +name = lsx_vssrlni_du_q +asm-fmts = vd, vj, ui7 +data-types = UV2DI, UV2DI, V2DI, USI + +/// lsx_vssrlrni_b_h +name = lsx_vssrlrni_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vssrlrni_h_w +name = lsx_vssrlrni_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vssrlrni_w_d +name = lsx_vssrlrni_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vssrlrni_d_q +name = lsx_vssrlrni_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vssrlrni_bu_h +name = lsx_vssrlrni_bu_h +asm-fmts = vd, vj, ui4 +data-types = UV16QI, UV16QI, V16QI, USI + +/// lsx_vssrlrni_hu_w +name = lsx_vssrlrni_hu_w +asm-fmts = vd, vj, ui5 +data-types = UV8HI, UV8HI, V8HI, USI + +/// lsx_vssrlrni_wu_d +name = lsx_vssrlrni_wu_d +asm-fmts = vd, vj, ui6 +data-types = UV4SI, UV4SI, V4SI, USI + +/// lsx_vssrlrni_du_q +name = lsx_vssrlrni_du_q +asm-fmts = vd, vj, ui7 +data-types = UV2DI, UV2DI, V2DI, USI + +/// lsx_vsrani_b_h +name = lsx_vsrani_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vsrani_h_w +name = lsx_vsrani_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vsrani_w_d +name = lsx_vsrani_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vsrani_d_q +name = lsx_vsrani_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vsrarni_b_h +name = lsx_vsrarni_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vsrarni_h_w +name = lsx_vsrarni_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vsrarni_w_d +name = lsx_vsrarni_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vsrarni_d_q +name = lsx_vsrarni_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vssrani_b_h +name = lsx_vssrani_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vssrani_h_w +name = lsx_vssrani_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vssrani_w_d +name = lsx_vssrani_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vssrani_d_q +name = lsx_vssrani_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vssrani_bu_h +name = lsx_vssrani_bu_h +asm-fmts = vd, vj, ui4 +data-types = UV16QI, UV16QI, V16QI, USI + +/// lsx_vssrani_hu_w +name = lsx_vssrani_hu_w +asm-fmts = vd, vj, ui5 +data-types = UV8HI, UV8HI, V8HI, USI + +/// lsx_vssrani_wu_d +name = lsx_vssrani_wu_d +asm-fmts = vd, vj, ui6 +data-types = UV4SI, UV4SI, V4SI, USI + +/// lsx_vssrani_du_q +name = lsx_vssrani_du_q +asm-fmts = vd, vj, ui7 +data-types = UV2DI, UV2DI, V2DI, USI + +/// lsx_vssrarni_b_h +name = lsx_vssrarni_b_h +asm-fmts = vd, vj, ui4 +data-types = V16QI, V16QI, V16QI, USI + +/// lsx_vssrarni_h_w +name = lsx_vssrarni_h_w +asm-fmts = vd, vj, ui5 +data-types = V8HI, V8HI, V8HI, USI + +/// lsx_vssrarni_w_d +name = lsx_vssrarni_w_d +asm-fmts = vd, vj, ui6 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vssrarni_d_q +name = lsx_vssrarni_d_q +asm-fmts = vd, vj, ui7 +data-types = V2DI, V2DI, V2DI, USI + +/// lsx_vssrarni_bu_h +name = lsx_vssrarni_bu_h +asm-fmts = vd, vj, ui4 +data-types = UV16QI, UV16QI, V16QI, USI + +/// lsx_vssrarni_hu_w +name = lsx_vssrarni_hu_w +asm-fmts = vd, vj, ui5 +data-types = UV8HI, UV8HI, V8HI, USI + +/// lsx_vssrarni_wu_d +name = lsx_vssrarni_wu_d +asm-fmts = vd, vj, ui6 +data-types = UV4SI, UV4SI, V4SI, USI + +/// lsx_vssrarni_du_q +name = lsx_vssrarni_du_q +asm-fmts = vd, vj, ui7 +data-types = UV2DI, UV2DI, V2DI, USI + +/// lsx_vpermi_w +name = lsx_vpermi_w +asm-fmts = vd, vj, ui8 +data-types = V4SI, V4SI, V4SI, USI + +/// lsx_vld +name = lsx_vld +asm-fmts = vd, rj, si12 +data-types = V16QI, CVPOINTER, SI + +/// lsx_vst +name = lsx_vst +asm-fmts = vd, rj, si12 +data-types = VOID, V16QI, CVPOINTER, SI + +/// lsx_vssrlrn_b_h +name = lsx_vssrlrn_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vssrlrn_h_w +name = lsx_vssrlrn_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vssrlrn_w_d +name = lsx_vssrlrn_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vssrln_b_h +name = lsx_vssrln_b_h +asm-fmts = vd, vj, vk +data-types = V16QI, V8HI, V8HI + +/// lsx_vssrln_h_w +name = lsx_vssrln_h_w +asm-fmts = vd, vj, vk +data-types = V8HI, V4SI, V4SI + +/// lsx_vssrln_w_d +name = lsx_vssrln_w_d +asm-fmts = vd, vj, vk +data-types = V4SI, V2DI, V2DI + +/// lsx_vorn_v +name = lsx_vorn_v +asm-fmts = vd, vj, vk +data-types = V16QI, V16QI, V16QI + +/// lsx_vldi +name = lsx_vldi +asm-fmts = vd, i13 +data-types = V2DI, HI + +/// lsx_vshuf_b +name = lsx_vshuf_b +asm-fmts = vd, vj, vk, va +data-types = V16QI, V16QI, V16QI, V16QI + +/// lsx_vldx +name = lsx_vldx +asm-fmts = vd, rj, rk +data-types = V16QI, CVPOINTER, DI + +/// lsx_vstx +name = lsx_vstx +asm-fmts = vd, rj, rk +data-types = VOID, V16QI, CVPOINTER, DI + +/// lsx_vextl_qu_du +name = lsx_vextl_qu_du +asm-fmts = vd, vj +data-types = UV2DI, UV2DI + +/// lsx_bnz_b +name = lsx_bnz_b +asm-fmts = cd, vj +data-types = SI, UV16QI + +/// lsx_bnz_d +name = lsx_bnz_d +asm-fmts = cd, vj +data-types = SI, UV2DI + +/// lsx_bnz_h +name = lsx_bnz_h +asm-fmts = cd, vj +data-types = SI, UV8HI + +/// lsx_bnz_v +name = lsx_bnz_v +asm-fmts = cd, vj +data-types = SI, UV16QI + +/// lsx_bnz_w +name = lsx_bnz_w +asm-fmts = cd, vj +data-types = SI, UV4SI + +/// lsx_bz_b +name = lsx_bz_b +asm-fmts = cd, vj +data-types = SI, UV16QI + +/// lsx_bz_d +name = lsx_bz_d +asm-fmts = cd, vj +data-types = SI, UV2DI + +/// lsx_bz_h +name = lsx_bz_h +asm-fmts = cd, vj +data-types = SI, UV8HI + +/// lsx_bz_v +name = lsx_bz_v +asm-fmts = cd, vj +data-types = SI, UV16QI + +/// lsx_bz_w +name = lsx_bz_w +asm-fmts = cd, vj +data-types = SI, UV4SI + +/// lsx_vfcmp_caf_d +name = lsx_vfcmp_caf_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_caf_s +name = lsx_vfcmp_caf_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_ceq_d +name = lsx_vfcmp_ceq_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_ceq_s +name = lsx_vfcmp_ceq_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cle_d +name = lsx_vfcmp_cle_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cle_s +name = lsx_vfcmp_cle_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_clt_d +name = lsx_vfcmp_clt_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_clt_s +name = lsx_vfcmp_clt_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cne_d +name = lsx_vfcmp_cne_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cne_s +name = lsx_vfcmp_cne_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cor_d +name = lsx_vfcmp_cor_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cor_s +name = lsx_vfcmp_cor_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cueq_d +name = lsx_vfcmp_cueq_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cueq_s +name = lsx_vfcmp_cueq_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cule_d +name = lsx_vfcmp_cule_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cule_s +name = lsx_vfcmp_cule_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cult_d +name = lsx_vfcmp_cult_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cult_s +name = lsx_vfcmp_cult_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cun_d +name = lsx_vfcmp_cun_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cune_d +name = lsx_vfcmp_cune_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_cune_s +name = lsx_vfcmp_cune_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_cun_s +name = lsx_vfcmp_cun_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_saf_d +name = lsx_vfcmp_saf_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_saf_s +name = lsx_vfcmp_saf_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_seq_d +name = lsx_vfcmp_seq_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_seq_s +name = lsx_vfcmp_seq_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sle_d +name = lsx_vfcmp_sle_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sle_s +name = lsx_vfcmp_sle_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_slt_d +name = lsx_vfcmp_slt_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_slt_s +name = lsx_vfcmp_slt_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sne_d +name = lsx_vfcmp_sne_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sne_s +name = lsx_vfcmp_sne_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sor_d +name = lsx_vfcmp_sor_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sor_s +name = lsx_vfcmp_sor_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sueq_d +name = lsx_vfcmp_sueq_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sueq_s +name = lsx_vfcmp_sueq_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sule_d +name = lsx_vfcmp_sule_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sule_s +name = lsx_vfcmp_sule_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sult_d +name = lsx_vfcmp_sult_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sult_s +name = lsx_vfcmp_sult_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sun_d +name = lsx_vfcmp_sun_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sune_d +name = lsx_vfcmp_sune_d +asm-fmts = vd, vj, vk +data-types = V2DI, V2DF, V2DF + +/// lsx_vfcmp_sune_s +name = lsx_vfcmp_sune_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vfcmp_sun_s +name = lsx_vfcmp_sun_s +asm-fmts = vd, vj, vk +data-types = V4SI, V4SF, V4SF + +/// lsx_vrepli_b +name = lsx_vrepli_b +asm-fmts = vd, si10 +data-types = V16QI, HI + +/// lsx_vrepli_d +name = lsx_vrepli_d +asm-fmts = vd, si10 +data-types = V2DI, HI + +/// lsx_vrepli_h +name = lsx_vrepli_h +asm-fmts = vd, si10 +data-types = V8HI, HI + +/// lsx_vrepli_w +name = lsx_vrepli_w +asm-fmts = vd, si10 +data-types = V4SI, HI + diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h b/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h new file mode 100644 index 000000000000..8fc826592c74 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h @@ -0,0 +1,5185 @@ +/* + * https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/loongarch/lsxintrin.h;hb=4912418dc1b51d49aca5982c6a2061bb912b92b7 + */ + +/* LARCH Loongson SX intrinsics include file. + + Copyright (C) 2018 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _GCC_LOONGSON_SXINTRIN_H +#define _GCC_LOONGSON_SXINTRIN_H 1 + +#if defined(__loongarch_sx) +typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); +typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); +typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); +typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); +typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); +typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); +typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); +typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); +typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); +typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); +typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); +typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); +typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); +typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); +typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); +typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); +typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); +typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); +typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); +typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); + +typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsll_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsll_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsll_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsll_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsll_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsll_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsll_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsll_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vslli_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vslli_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vslli_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vslli_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vslli_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslli_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vslli_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vslli_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsra_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsra_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsra_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsra_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsra_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsra_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsra_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsra_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vsrai_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsrai_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vsrai_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsrai_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vsrai_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsrai_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vsrai_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vsrai_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrar_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrar_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrar_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrar_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrar_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrar_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrar_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrar_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vsrari_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsrari_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vsrari_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsrari_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vsrari_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsrari_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vsrari_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vsrari_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrl_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrl_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrl_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrl_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrl_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrl_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrl_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrl_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vsrli_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsrli_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vsrli_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsrli_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vsrli_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsrli_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vsrli_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vsrli_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlr_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlr_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlr_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlr_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlr_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlr_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlr_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlr_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vsrlri_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsrlri_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vsrlri_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsrlri_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vsrlri_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsrlri_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vsrlri_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vsrlri_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitclr_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitclr_b ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitclr_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitclr_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitclr_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitclr_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitclr_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitclr_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vbitclri_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vbitclri_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ +#define __lsx_vbitclri_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vbitclri_h ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ +#define __lsx_vbitclri_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vbitclri_w ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ +#define __lsx_vbitclri_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vbitclri_d ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitset_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitset_b ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitset_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitset_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitset_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitset_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitset_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitset_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vbitseti_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vbitseti_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ +#define __lsx_vbitseti_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vbitseti_h ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ +#define __lsx_vbitseti_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vbitseti_w ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ +#define __lsx_vbitseti_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vbitseti_d ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitrev_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitrev_b ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitrev_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitrev_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitrev_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitrev_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitrev_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vbitrev_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vbitrevi_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vbitrevi_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ +#define __lsx_vbitrevi_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vbitrevi_h ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ +#define __lsx_vbitrevi_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vbitrevi_w ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ +#define __lsx_vbitrevi_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vbitrevi_d ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadd_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadd_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadd_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadd_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadd_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadd_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadd_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadd_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vaddi_bu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vaddi_bu ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vaddi_hu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vaddi_hu ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vaddi_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vaddi_wu ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vaddi_du(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vaddi_du ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsub_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsub_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsub_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsub_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsub_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsub_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsub_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsub_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vsubi_bu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsubi_bu ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vsubi_hu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsubi_hu ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vsubi_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsubi_wu ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vsubi_du(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsubi_du ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V16QI, V16QI, QI. */ +#define __lsx_vmaxi_b(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V8HI, V8HI, QI. */ +#define __lsx_vmaxi_h(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V4SI, V4SI, QI. */ +#define __lsx_vmaxi_w(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V2DI, V2DI, QI. */ +#define __lsx_vmaxi_d(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmax_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmax_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vmaxi_bu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_bu ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ +#define __lsx_vmaxi_hu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_hu ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ +#define __lsx_vmaxi_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_wu ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ +#define __lsx_vmaxi_du(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmaxi_du ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V16QI, V16QI, QI. */ +#define __lsx_vmini_b(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V8HI, V8HI, QI. */ +#define __lsx_vmini_h(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V4SI, V4SI, QI. */ +#define __lsx_vmini_w(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V2DI, V2DI, QI. */ +#define __lsx_vmini_d(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmin_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmin_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vmini_bu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_bu ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ +#define __lsx_vmini_hu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_hu ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ +#define __lsx_vmini_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_wu ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ +#define __lsx_vmini_du(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vmini_du ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vseq_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vseq_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vseq_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vseq_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vseq_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vseq_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vseq_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vseq_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V16QI, V16QI, QI. */ +#define __lsx_vseqi_b(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vseqi_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V8HI, V8HI, QI. */ +#define __lsx_vseqi_h(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vseqi_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V4SI, V4SI, QI. */ +#define __lsx_vseqi_w(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vseqi_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V2DI, V2DI, QI. */ +#define __lsx_vseqi_d(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vseqi_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V16QI, V16QI, QI. */ +#define __lsx_vslti_b(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V8HI, V8HI, QI. */ +#define __lsx_vslti_h(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V4SI, V4SI, QI. */ +#define __lsx_vslti_w(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V2DI, V2DI, QI. */ +#define __lsx_vslti_d(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vslt_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vslt_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, UV16QI, UQI. */ +#define __lsx_vslti_bu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_bu ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, UV8HI, UQI. */ +#define __lsx_vslti_hu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_hu ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, UV4SI, UQI. */ +#define __lsx_vslti_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_wu ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V2DI, UV2DI, UQI. */ +#define __lsx_vslti_du(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslti_du ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V16QI, V16QI, QI. */ +#define __lsx_vslei_b(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V8HI, V8HI, QI. */ +#define __lsx_vslei_h(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V4SI, V4SI, QI. */ +#define __lsx_vslei_w(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, si5. */ +/* Data types in instruction templates: V2DI, V2DI, QI. */ +#define __lsx_vslei_d(/*__m128i*/ _1, /*si5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsle_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsle_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, UV16QI, UQI. */ +#define __lsx_vslei_bu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_bu ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, UV8HI, UQI. */ +#define __lsx_vslei_hu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_hu ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, UV4SI, UQI. */ +#define __lsx_vslei_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_wu ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V2DI, UV2DI, UQI. */ +#define __lsx_vslei_du(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vslei_du ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vsat_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsat_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vsat_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsat_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vsat_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsat_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vsat_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vsat_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vsat_bu(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsat_bu ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ +#define __lsx_vsat_hu(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsat_hu ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ +#define __lsx_vsat_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsat_wu ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ +#define __lsx_vsat_du(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vsat_du ((v2u64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadda_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadda_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadda_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadda_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadda_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadda_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadda_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadda_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsadd_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsadd_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavg_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavg_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vavgr_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vavgr_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssub_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssub_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vabsd_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vabsd_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmul_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmul_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmul_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmul_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmul_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmul_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmul_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmul_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmadd_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmadd_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmadd_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmadd_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmadd_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmadd_w ((v4i32)_1, (v4i32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmadd_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmadd_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmsub_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmsub_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmsub_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmsub_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmsub_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmsub_w ((v4i32)_1, (v4i32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmsub_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmsub_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vdiv_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vdiv_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_hu_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_hu_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_wu_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_wu_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_du_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_du_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_hu_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_hu_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_wu_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_wu_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_du_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_du_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmod_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmod_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, rk. */ +/* Data types in instruction templates: V16QI, V16QI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplve_b (__m128i _1, int _2) +{ + return (__m128i)__builtin_lsx_vreplve_b ((v16i8)_1, (int)_2); +} + +/* Assembly instruction format: vd, vj, rk. */ +/* Data types in instruction templates: V8HI, V8HI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplve_h (__m128i _1, int _2) +{ + return (__m128i)__builtin_lsx_vreplve_h ((v8i16)_1, (int)_2); +} + +/* Assembly instruction format: vd, vj, rk. */ +/* Data types in instruction templates: V4SI, V4SI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplve_w (__m128i _1, int _2) +{ + return (__m128i)__builtin_lsx_vreplve_w ((v4i32)_1, (int)_2); +} + +/* Assembly instruction format: vd, vj, rk. */ +/* Data types in instruction templates: V2DI, V2DI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplve_d (__m128i _1, int _2) +{ + return (__m128i)__builtin_lsx_vreplve_d ((v2i64)_1, (int)_2); +} + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vreplvei_b(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vreplvei_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vreplvei_h(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vreplvei_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui2. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vreplvei_w(/*__m128i*/ _1, /*ui2*/ _2) \ + ((__m128i)__builtin_lsx_vreplvei_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui1. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vreplvei_d(/*__m128i*/ _1, /*ui1*/ _2) \ + ((__m128i)__builtin_lsx_vreplvei_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickev_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickev_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickev_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickev_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickev_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickev_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickev_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickev_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickod_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickod_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickod_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickod_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickod_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickod_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpickod_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpickod_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvh_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvh_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvh_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvh_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvh_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvh_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvh_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvh_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvl_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvl_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvl_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvl_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvl_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvl_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vilvl_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vilvl_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackev_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackev_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackev_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackev_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackev_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackev_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackev_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackev_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackod_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackod_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackod_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackod_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackod_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackod_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpackod_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vpackod_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vshuf_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vshuf_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vshuf_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vshuf_w ((v4i32)_1, (v4i32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vshuf_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vshuf_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vand_v (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vand_v ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vandi_b(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vandi_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vor_v (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vor_v ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vori_b(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vori_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vnor_v (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vnor_v ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vnori_b(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vnori_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vxor_v (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vxor_v ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ +#define __lsx_vxori_b(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vxori_b ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vbitsel_v (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vbitsel_v ((v16u8)_1, (v16u8)_2, (v16u8)_3); +} + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, USI. */ +#define __lsx_vbitseli_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vbitseli_b ((v16u8)(_1), (v16u8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V16QI, V16QI, USI. */ +#define __lsx_vshuf4i_b(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vshuf4i_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V8HI, V8HI, USI. */ +#define __lsx_vshuf4i_h(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vshuf4i_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V4SI, V4SI, USI. */ +#define __lsx_vshuf4i_w(/*__m128i*/ _1, /*ui8*/ _2) \ + ((__m128i)__builtin_lsx_vshuf4i_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, rj. */ +/* Data types in instruction templates: V16QI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplgr2vr_b (int _1) +{ + return (__m128i)__builtin_lsx_vreplgr2vr_b ((int)_1); +} + +/* Assembly instruction format: vd, rj. */ +/* Data types in instruction templates: V8HI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplgr2vr_h (int _1) +{ + return (__m128i)__builtin_lsx_vreplgr2vr_h ((int)_1); +} + +/* Assembly instruction format: vd, rj. */ +/* Data types in instruction templates: V4SI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplgr2vr_w (int _1) +{ + return (__m128i)__builtin_lsx_vreplgr2vr_w ((int)_1); +} + +/* Assembly instruction format: vd, rj. */ +/* Data types in instruction templates: V2DI, DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vreplgr2vr_d (long int _1) +{ + return (__m128i)__builtin_lsx_vreplgr2vr_d ((long int)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpcnt_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vpcnt_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpcnt_h (__m128i _1) +{ + return (__m128i)__builtin_lsx_vpcnt_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpcnt_w (__m128i _1) +{ + return (__m128i)__builtin_lsx_vpcnt_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vpcnt_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vpcnt_d ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclo_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclo_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclo_h (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclo_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclo_w (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclo_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclo_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclo_d ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclz_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclz_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclz_h (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclz_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclz_w (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclz_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vclz_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vclz_d ((v2i64)_1); +} + +/* Assembly instruction format: rd, vj, ui4. */ +/* Data types in instruction templates: SI, V16QI, UQI. */ +#define __lsx_vpickve2gr_b(/*__m128i*/ _1, /*ui4*/ _2) \ + ((int)__builtin_lsx_vpickve2gr_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui3. */ +/* Data types in instruction templates: SI, V8HI, UQI. */ +#define __lsx_vpickve2gr_h(/*__m128i*/ _1, /*ui3*/ _2) \ + ((int)__builtin_lsx_vpickve2gr_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui2. */ +/* Data types in instruction templates: SI, V4SI, UQI. */ +#define __lsx_vpickve2gr_w(/*__m128i*/ _1, /*ui2*/ _2) \ + ((int)__builtin_lsx_vpickve2gr_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui1. */ +/* Data types in instruction templates: DI, V2DI, UQI. */ +#define __lsx_vpickve2gr_d(/*__m128i*/ _1, /*ui1*/ _2) \ + ((long int)__builtin_lsx_vpickve2gr_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui4. */ +/* Data types in instruction templates: USI, V16QI, UQI. */ +#define __lsx_vpickve2gr_bu(/*__m128i*/ _1, /*ui4*/ _2) \ + ((unsigned int)__builtin_lsx_vpickve2gr_bu ((v16i8)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui3. */ +/* Data types in instruction templates: USI, V8HI, UQI. */ +#define __lsx_vpickve2gr_hu(/*__m128i*/ _1, /*ui3*/ _2) \ + ((unsigned int)__builtin_lsx_vpickve2gr_hu ((v8i16)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui2. */ +/* Data types in instruction templates: USI, V4SI, UQI. */ +#define __lsx_vpickve2gr_wu(/*__m128i*/ _1, /*ui2*/ _2) \ + ((unsigned int)__builtin_lsx_vpickve2gr_wu ((v4i32)(_1), (_2))) + +/* Assembly instruction format: rd, vj, ui1. */ +/* Data types in instruction templates: UDI, V2DI, UQI. */ +#define __lsx_vpickve2gr_du(/*__m128i*/ _1, /*ui1*/ _2) \ + ((unsigned long int)__builtin_lsx_vpickve2gr_du ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, rj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, SI, UQI. */ +#define __lsx_vinsgr2vr_b(/*__m128i*/ _1, /*int*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vinsgr2vr_b ((v16i8)(_1), (int)(_2), (_3))) + +/* Assembly instruction format: vd, rj, ui3. */ +/* Data types in instruction templates: V8HI, V8HI, SI, UQI. */ +#define __lsx_vinsgr2vr_h(/*__m128i*/ _1, /*int*/ _2, /*ui3*/ _3) \ + ((__m128i)__builtin_lsx_vinsgr2vr_h ((v8i16)(_1), (int)(_2), (_3))) + +/* Assembly instruction format: vd, rj, ui2. */ +/* Data types in instruction templates: V4SI, V4SI, SI, UQI. */ +#define __lsx_vinsgr2vr_w(/*__m128i*/ _1, /*int*/ _2, /*ui2*/ _3) \ + ((__m128i)__builtin_lsx_vinsgr2vr_w ((v4i32)(_1), (int)(_2), (_3))) + +/* Assembly instruction format: vd, rj, ui1. */ +/* Data types in instruction templates: V2DI, V2DI, DI, UQI. */ +#define __lsx_vinsgr2vr_d(/*__m128i*/ _1, /*long int*/ _2, /*ui1*/ _3) \ + ((__m128i)__builtin_lsx_vinsgr2vr_d ((v2i64)(_1), (long int)(_2), (_3))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfadd_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfadd_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfadd_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfadd_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfsub_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfsub_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfsub_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfsub_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmul_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfmul_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmul_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfmul_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfdiv_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfdiv_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfdiv_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfdiv_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcvt_h_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcvt_h_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfcvt_s_d (__m128d _1, __m128d _2) +{ + return (__m128)__builtin_lsx_vfcvt_s_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmin_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfmin_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmin_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfmin_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmina_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfmina_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmina_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfmina_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmax_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfmax_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmax_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfmax_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmaxa_s (__m128 _1, __m128 _2) +{ + return (__m128)__builtin_lsx_vfmaxa_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmaxa_d (__m128d _1, __m128d _2) +{ + return (__m128d)__builtin_lsx_vfmaxa_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfclass_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vfclass_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfclass_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vfclass_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfsqrt_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfsqrt_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfsqrt_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfsqrt_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrecip_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrecip_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrecip_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrecip_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrint_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrint_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrint_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrint_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrsqrt_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrsqrt_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrsqrt_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrsqrt_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vflogb_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vflogb_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vflogb_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vflogb_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfcvth_s_h (__m128i _1) +{ + return (__m128)__builtin_lsx_vfcvth_s_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfcvth_d_s (__m128 _1) +{ + return (__m128d)__builtin_lsx_vfcvth_d_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfcvtl_s_h (__m128i _1) +{ + return (__m128)__builtin_lsx_vfcvtl_s_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfcvtl_d_s (__m128 _1) +{ + return (__m128d)__builtin_lsx_vfcvtl_d_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftint_w_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftint_w_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftint_l_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftint_l_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftint_wu_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftint_wu_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftint_lu_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftint_lu_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrz_w_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrz_w_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrz_l_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftintrz_l_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrz_wu_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrz_wu_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrz_lu_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftintrz_lu_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vffint_s_w (__m128i _1) +{ + return (__m128)__builtin_lsx_vffint_s_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vffint_d_l (__m128i _1) +{ + return (__m128d)__builtin_lsx_vffint_d_l ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vffint_s_wu (__m128i _1) +{ + return (__m128)__builtin_lsx_vffint_s_wu ((v4u32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vffint_d_lu (__m128i _1) +{ + return (__m128d)__builtin_lsx_vffint_d_lu ((v2u64)_1); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vandn_v (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vandn_v ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vneg_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vneg_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vneg_h (__m128i _1) +{ + return (__m128i)__builtin_lsx_vneg_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vneg_w (__m128i _1) +{ + return (__m128i)__builtin_lsx_vneg_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vneg_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vneg_d ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmuh_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmuh_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V8HI, V16QI, UQI. */ +#define __lsx_vsllwil_h_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsllwil_h_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V4SI, V8HI, UQI. */ +#define __lsx_vsllwil_w_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsllwil_w_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V2DI, V4SI, UQI. */ +#define __lsx_vsllwil_d_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsllwil_d_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: UV8HI, UV16QI, UQI. */ +#define __lsx_vsllwil_hu_bu(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vsllwil_hu_bu ((v16u8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV4SI, UV8HI, UQI. */ +#define __lsx_vsllwil_wu_hu(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vsllwil_wu_hu ((v8u16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV2DI, UV4SI, UQI. */ +#define __lsx_vsllwil_du_wu(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vsllwil_du_wu ((v4u32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsran_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsran_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsran_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsran_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsran_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsran_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssran_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssran_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssran_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssran_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssran_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssran_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssran_bu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssran_bu_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssran_hu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssran_hu_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssran_wu_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssran_wu_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrarn_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrarn_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrarn_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrarn_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrarn_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrarn_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrarn_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrarn_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrarn_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrarn_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrarn_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrarn_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrarn_bu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrarn_bu_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrarn_hu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrarn_hu_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrarn_wu_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrarn_wu_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrln_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrln_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrln_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrln_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrln_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrln_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrln_bu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrln_bu_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrln_hu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrln_hu_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrln_wu_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrln_wu_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlrn_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlrn_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlrn_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlrn_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsrlrn_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsrlrn_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrlrn_bu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrlrn_bu_h ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrlrn_hu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrlrn_hu_w ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrlrn_wu_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrlrn_wu_d ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, UQI. */ +#define __lsx_vfrstpi_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vfrstpi_b ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, UQI. */ +#define __lsx_vfrstpi_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vfrstpi_h ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfrstp_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vfrstp_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfrstp_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vfrstp_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vshuf4i_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vshuf4i_d ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vbsrl_v(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vbsrl_v ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vbsll_v(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vbsll_v ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vextrins_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vextrins_b ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vextrins_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vextrins_h ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vextrins_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vextrins_w ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vextrins_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vextrins_d ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmskltz_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vmskltz_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmskltz_h (__m128i _1) +{ + return (__m128i)__builtin_lsx_vmskltz_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmskltz_w (__m128i _1) +{ + return (__m128i)__builtin_lsx_vmskltz_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmskltz_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vmskltz_d ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsigncov_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsigncov_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsigncov_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsigncov_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsigncov_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsigncov_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsigncov_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsigncov_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmadd_s (__m128 _1, __m128 _2, __m128 _3) +{ + return (__m128)__builtin_lsx_vfmadd_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmadd_d (__m128d _1, __m128d _2, __m128d _3) +{ + return (__m128d)__builtin_lsx_vfmadd_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfmsub_s (__m128 _1, __m128 _2, __m128 _3) +{ + return (__m128)__builtin_lsx_vfmsub_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfmsub_d (__m128d _1, __m128d _2, __m128d _3) +{ + return (__m128d)__builtin_lsx_vfmsub_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfnmadd_s (__m128 _1, __m128 _2, __m128 _3) +{ + return (__m128)__builtin_lsx_vfnmadd_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfnmadd_d (__m128d _1, __m128d _2, __m128d _3) +{ + return (__m128d)__builtin_lsx_vfnmadd_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfnmsub_s (__m128 _1, __m128 _2, __m128 _3) +{ + return (__m128)__builtin_lsx_vfnmsub_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); +} + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfnmsub_d (__m128d _1, __m128d _2, __m128d _3) +{ + return (__m128d)__builtin_lsx_vfnmsub_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrne_w_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrne_w_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrne_l_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftintrne_l_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrp_w_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrp_w_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrp_l_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftintrp_l_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrm_w_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrm_w_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrm_l_d (__m128d _1) +{ + return (__m128i)__builtin_lsx_vftintrm_l_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftint_w_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vftint_w_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SF, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vffint_s_l (__m128i _1, __m128i _2) +{ + return (__m128)__builtin_lsx_vffint_s_l ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrz_w_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vftintrz_w_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrp_w_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vftintrp_w_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrm_w_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vftintrm_w_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrne_w_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vftintrne_w_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintl_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintl_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftinth_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftinth_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vffinth_d_w (__m128i _1) +{ + return (__m128d)__builtin_lsx_vffinth_d_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vffintl_d_w (__m128i _1) +{ + return (__m128d)__builtin_lsx_vffintl_d_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrzl_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrzl_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrzh_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrzh_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrpl_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrpl_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrph_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrph_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrml_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrml_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrmh_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrmh_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrnel_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrnel_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vftintrneh_l_s (__m128 _1) +{ + return (__m128i)__builtin_lsx_vftintrneh_l_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrintrne_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrintrne_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrintrne_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrintrne_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrintrz_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrintrz_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrintrz_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrintrz_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrintrp_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrintrp_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrintrp_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrintrp_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lsx_vfrintrm_s (__m128 _1) +{ + return (__m128)__builtin_lsx_vfrintrm_s ((v4f32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lsx_vfrintrm_d (__m128d _1) +{ + return (__m128d)__builtin_lsx_vfrintrm_d ((v2f64)_1); +} + +/* Assembly instruction format: vd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI, UQI. */ +#define __lsx_vstelm_b(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lsx_vstelm_b ((v16i8)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: vd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V8HI, CVPOINTER, SI, UQI. */ +#define __lsx_vstelm_h(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lsx_vstelm_h ((v8i16)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: vd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V4SI, CVPOINTER, SI, UQI. */ +#define __lsx_vstelm_w(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lsx_vstelm_w ((v4i32)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: vd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V2DI, CVPOINTER, SI, UQI. */ +#define __lsx_vstelm_d(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lsx_vstelm_d ((v2i64)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_d_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_d_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_w_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_w_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_h_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_h_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_d_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_d_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_w_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_w_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_h_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_h_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_d_wu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_d_wu_w ((v4u32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_w_hu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_w_hu_h ((v8u16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_h_bu_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_h_bu_b ((v16u8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_d_wu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_d_wu_w ((v4u32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_w_hu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_w_hu_h ((v8u16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_h_bu_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_h_bu_b ((v16u8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_d_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_d_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_w_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_w_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_h_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_h_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_d_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_d_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_w_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_w_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_h_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_h_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_q_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_q_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_q_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_q_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwev_q_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwev_q_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsubwod_q_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsubwod_q_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwev_q_du_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwev_q_du_d ((v2u64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vaddwod_q_du_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vaddwod_q_du_d ((v2u64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_d_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_d_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_w_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_w_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_h_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_h_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_d_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_d_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_w_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_w_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_h_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_h_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_d_wu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_d_wu ((v4u32)_1, (v4u32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_w_hu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_w_hu ((v8u16)_1, (v8u16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_h_bu (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_h_bu ((v16u8)_1, (v16u8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_d_wu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_d_wu_w ((v4u32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_w_hu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_w_hu_h ((v8u16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_h_bu_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_h_bu_b ((v16u8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_d_wu_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_d_wu_w ((v4u32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_w_hu_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_w_hu_h ((v8u16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_h_bu_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_h_bu_b ((v16u8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_q_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_q_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_q_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_q_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwev_q_du_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwev_q_du_d ((v2u64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmulwod_q_du_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vmulwod_q_du_d ((v2u64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhaddw_qu_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhaddw_qu_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_q_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_q_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vhsubw_qu_du (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vhsubw_qu_du ((v2u64)_1, (v2u64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_d_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_d_w ((v2i64)_1, (v4i32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_w_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_w_h ((v4i32)_1, (v8i16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_h_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_h_b ((v8i16)_1, (v16i8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_d_wu (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_d_wu ((v2u64)_1, (v4u32)_2, (v4u32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_w_hu (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_w_hu ((v4u32)_1, (v8u16)_2, (v8u16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_h_bu (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_h_bu ((v8u16)_1, (v16u8)_2, (v16u8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_d_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_d_w ((v2i64)_1, (v4i32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_w_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_w_h ((v4i32)_1, (v8i16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_h_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_h_b ((v8i16)_1, (v16i8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_d_wu (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_d_wu ((v2u64)_1, (v4u32)_2, (v4u32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_w_hu (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_w_hu ((v4u32)_1, (v8u16)_2, (v8u16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_h_bu (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_h_bu ((v8u16)_1, (v16u8)_2, (v16u8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_d_wu_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_d_wu_w ((v2i64)_1, (v4u32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_w_hu_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_w_hu_h ((v4i32)_1, (v8u16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_h_bu_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_h_bu_b ((v8i16)_1, (v16u8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_d_wu_w (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_d_wu_w ((v2i64)_1, (v4u32)_2, (v4i32)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_w_hu_h (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_w_hu_h ((v4i32)_1, (v8u16)_2, (v8i16)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_h_bu_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_h_bu_b ((v8i16)_1, (v16u8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_q_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_q_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_q_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_q_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_q_du (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_q_du ((v2u64)_1, (v2u64)_2, (v2u64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_q_du (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_q_du ((v2u64)_1, (v2u64)_2, (v2u64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwev_q_du_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwev_q_du_d ((v2i64)_1, (v2u64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmaddwod_q_du_d (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vmaddwod_q_du_d ((v2i64)_1, (v2u64)_2, (v2i64)_3); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vrotr_b (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vrotr_b ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vrotr_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vrotr_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vrotr_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vrotr_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vrotr_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vrotr_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vadd_q (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vadd_q ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vsub_q (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vsub_q ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, rj, si12. */ +/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ +#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) \ + ((__m128i)__builtin_lsx_vldrepl_b ((void *)(_1), (_2))) + +/* Assembly instruction format: vd, rj, si11. */ +/* Data types in instruction templates: V8HI, CVPOINTER, SI. */ +#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) \ + ((__m128i)__builtin_lsx_vldrepl_h ((void *)(_1), (_2))) + +/* Assembly instruction format: vd, rj, si10. */ +/* Data types in instruction templates: V4SI, CVPOINTER, SI. */ +#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) \ + ((__m128i)__builtin_lsx_vldrepl_w ((void *)(_1), (_2))) + +/* Assembly instruction format: vd, rj, si9. */ +/* Data types in instruction templates: V2DI, CVPOINTER, SI. */ +#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) \ + ((__m128i)__builtin_lsx_vldrepl_d ((void *)(_1), (_2))) + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmskgez_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vmskgez_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vmsknz_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vmsknz_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V8HI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_h_b (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_h_b ((v16i8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V4SI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_w_h (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_w_h ((v8i16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_d_w (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_d_w ((v4i32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_q_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_q_d ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV8HI, UV16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_hu_bu (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_hu_bu ((v16u8)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV4SI, UV8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_wu_hu (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_wu_hu ((v8u16)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV2DI, UV4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_du_wu (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_du_wu ((v4u32)_1); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vexth_qu_du (__m128i _1) +{ + return (__m128i)__builtin_lsx_vexth_qu_du ((v2u64)_1); +} + +/* Assembly instruction format: vd, vj, ui3. */ +/* Data types in instruction templates: V16QI, V16QI, UQI. */ +#define __lsx_vrotri_b(/*__m128i*/ _1, /*ui3*/ _2) \ + ((__m128i)__builtin_lsx_vrotri_b ((v16i8)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V8HI, V8HI, UQI. */ +#define __lsx_vrotri_h(/*__m128i*/ _1, /*ui4*/ _2) \ + ((__m128i)__builtin_lsx_vrotri_h ((v8i16)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V4SI, V4SI, UQI. */ +#define __lsx_vrotri_w(/*__m128i*/ _1, /*ui5*/ _2) \ + ((__m128i)__builtin_lsx_vrotri_w ((v4i32)(_1), (_2))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V2DI, V2DI, UQI. */ +#define __lsx_vrotri_d(/*__m128i*/ _1, /*ui6*/ _2) \ + ((__m128i)__builtin_lsx_vrotri_d ((v2i64)(_1), (_2))) + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vextl_q_d (__m128i _1) +{ + return (__m128i)__builtin_lsx_vextl_q_d ((v2i64)_1); +} + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vsrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vsrlni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vsrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vsrlni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vsrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vsrlni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vsrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vsrlni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vsrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vsrlrni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vsrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vsrlrni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vsrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vsrlrni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vsrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vsrlrni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vssrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vssrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vssrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vssrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ +#define __lsx_vssrlni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ +#define __lsx_vssrlni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ +#define __lsx_vssrlni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ +#define __lsx_vssrlni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrlni_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vssrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vssrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vssrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vssrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ +#define __lsx_vssrlrni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ +#define __lsx_vssrlrni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ +#define __lsx_vssrlrni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ +#define __lsx_vssrlrni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrlrni_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vsrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vsrani_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vsrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vsrani_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vsrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vsrani_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vsrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vsrani_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vsrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vsrarni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vsrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vsrarni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vsrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vsrarni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vsrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vsrarni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vssrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vssrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vssrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vssrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ +#define __lsx_vssrani_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ +#define __lsx_vssrani_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ +#define __lsx_vssrani_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ +#define __lsx_vssrani_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrani_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ +#define __lsx_vssrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ +#define __lsx_vssrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vssrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ +#define __lsx_vssrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui4. */ +/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ +#define __lsx_vssrarni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui5. */ +/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ +#define __lsx_vssrarni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui6. */ +/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ +#define __lsx_vssrarni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui7. */ +/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ +#define __lsx_vssrarni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ + ((__m128i)__builtin_lsx_vssrarni_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) + +/* Assembly instruction format: vd, vj, ui8. */ +/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ +#define __lsx_vpermi_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ + ((__m128i)__builtin_lsx_vpermi_w ((v4i32)(_1), (v4i32)(_2), (_3))) + +/* Assembly instruction format: vd, rj, si12. */ +/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ +#define __lsx_vld(/*void **/ _1, /*si12*/ _2) \ + ((__m128i)__builtin_lsx_vld ((void *)(_1), (_2))) + +/* Assembly instruction format: vd, rj, si12. */ +/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI. */ +#define __lsx_vst(/*__m128i*/ _1, /*void **/ _2, /*si12*/ _3) \ + ((void)__builtin_lsx_vst ((v16i8)(_1), (void *)(_2), (_3))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrlrn_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrlrn_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrlrn_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrlrn_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrlrn_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrlrn_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V8HI, V8HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrln_b_h (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrln_b_h ((v8i16)_1, (v8i16)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V8HI, V4SI, V4SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrln_h_w (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrln_h_w ((v4i32)_1, (v4i32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vssrln_w_d (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vssrln_w_d ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vorn_v (__m128i _1, __m128i _2) +{ + return (__m128i)__builtin_lsx_vorn_v ((v16i8)_1, (v16i8)_2); +} + +/* Assembly instruction format: vd, i13. */ +/* Data types in instruction templates: V2DI, HI. */ +#define __lsx_vldi(/*i13*/ _1) \ + ((__m128i)__builtin_lsx_vldi ((_1))) + +/* Assembly instruction format: vd, vj, vk, va. */ +/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vshuf_b (__m128i _1, __m128i _2, __m128i _3) +{ + return (__m128i)__builtin_lsx_vshuf_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); +} + +/* Assembly instruction format: vd, rj, rk. */ +/* Data types in instruction templates: V16QI, CVPOINTER, DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vldx (void * _1, long int _2) +{ + return (__m128i)__builtin_lsx_vldx ((void *)_1, (long int)_2); +} + +/* Assembly instruction format: vd, rj, rk. */ +/* Data types in instruction templates: VOID, V16QI, CVPOINTER, DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +void __lsx_vstx (__m128i _1, void * _2, long int _3) +{ + return (void)__builtin_lsx_vstx ((v16i8)_1, (void *)_2, (long int)_3); +} + +/* Assembly instruction format: vd, vj. */ +/* Data types in instruction templates: UV2DI, UV2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vextl_qu_du (__m128i _1) +{ + return (__m128i)__builtin_lsx_vextl_qu_du ((v2u64)_1); +} + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV16QI. */ +#define __lsx_bnz_b(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bnz_b ((v16u8)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV2DI. */ +#define __lsx_bnz_d(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bnz_d ((v2u64)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV8HI. */ +#define __lsx_bnz_h(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bnz_h ((v8u16)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV16QI. */ +#define __lsx_bnz_v(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bnz_v ((v16u8)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV4SI. */ +#define __lsx_bnz_w(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bnz_w ((v4u32)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV16QI. */ +#define __lsx_bz_b(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bz_b ((v16u8)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV2DI. */ +#define __lsx_bz_d(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bz_d ((v2u64)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV8HI. */ +#define __lsx_bz_h(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bz_h ((v8u16)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV16QI. */ +#define __lsx_bz_v(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bz_v ((v16u8)(_1))) + +/* Assembly instruction format: cd, vj. */ +/* Data types in instruction templates: SI, UV4SI. */ +#define __lsx_bz_w(/*__m128i*/ _1) \ + ((int)__builtin_lsx_bz_w ((v4u32)(_1))) + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_caf_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_caf_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_caf_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_caf_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_ceq_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_ceq_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_ceq_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_ceq_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cle_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cle_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cle_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cle_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_clt_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_clt_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_clt_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_clt_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cne_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cne_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cne_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cne_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cor_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cor_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cor_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cor_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cueq_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cueq_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cueq_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cueq_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cule_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cule_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cule_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cule_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cult_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cult_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cult_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cult_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cun_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cun_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cune_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cune_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cune_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cune_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_cun_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_cun_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_saf_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_saf_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_saf_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_saf_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_seq_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_seq_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_seq_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_seq_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sle_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sle_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sle_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sle_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_slt_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_slt_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_slt_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_slt_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sne_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sne_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sne_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sne_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sor_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sor_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sor_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sor_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sueq_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sueq_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sueq_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sueq_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sule_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sule_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sule_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sule_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sult_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sult_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sult_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sult_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sun_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sun_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V2DI, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sune_d (__m128d _1, __m128d _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sune_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sune_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sune_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, vj, vk. */ +/* Data types in instruction templates: V4SI, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lsx_vfcmp_sun_s (__m128 _1, __m128 _2) +{ + return (__m128i)__builtin_lsx_vfcmp_sun_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: vd, si10. */ +/* Data types in instruction templates: V16QI, HI. */ +#define __lsx_vrepli_b(/*si10*/ _1) \ + ((__m128i)__builtin_lsx_vrepli_b ((_1))) + +/* Assembly instruction format: vd, si10. */ +/* Data types in instruction templates: V2DI, HI. */ +#define __lsx_vrepli_d(/*si10*/ _1) \ + ((__m128i)__builtin_lsx_vrepli_d ((_1))) + +/* Assembly instruction format: vd, si10. */ +/* Data types in instruction templates: V8HI, HI. */ +#define __lsx_vrepli_h(/*si10*/ _1) \ + ((__m128i)__builtin_lsx_vrepli_h ((_1))) + +/* Assembly instruction format: vd, si10. */ +/* Data types in instruction templates: V4SI, HI. */ +#define __lsx_vrepli_w(/*si10*/ _1) \ + ((__m128i)__builtin_lsx_vrepli_w ((_1))) + +#endif /* defined(__loongarch_sx) */ +#endif /* _GCC_LOONGSON_SXINTRIN_H */