diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs new file mode 100644 index 000000000000..86c2b4459438 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -0,0 +1,7027 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-loongarch/lasx.spec` and run the following command to re-generate this file: +// +// ``` +// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasx.spec +// ``` + +use super::types::*; + +#[allow(improper_ctypes)] +extern "unadjusted" { + #[link_name = "llvm.loongarch.lasx.xvsll.b"] + fn __lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsll.h"] + fn __lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsll.w"] + fn __lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsll.d"] + fn __lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslli.b"] + fn __lasx_xvslli_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslli.h"] + fn __lasx_xvslli_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslli.w"] + fn __lasx_xvslli_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslli.d"] + fn __lasx_xvslli_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsra.b"] + fn __lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsra.h"] + fn __lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsra.w"] + fn __lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsra.d"] + fn __lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrai.b"] + fn __lasx_xvsrai_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrai.h"] + fn __lasx_xvsrai_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrai.w"] + fn __lasx_xvsrai_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrai.d"] + fn __lasx_xvsrai_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrar.b"] + fn __lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrar.h"] + fn __lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrar.w"] + fn __lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrar.d"] + fn __lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrari.b"] + fn __lasx_xvsrari_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrari.h"] + fn __lasx_xvsrari_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrari.w"] + fn __lasx_xvsrari_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrari.d"] + fn __lasx_xvsrari_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrl.b"] + fn __lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrl.h"] + fn __lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrl.w"] + fn __lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrl.d"] + fn __lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrli.b"] + fn __lasx_xvsrli_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrli.h"] + fn __lasx_xvsrli_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrli.w"] + fn __lasx_xvsrli_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrli.d"] + fn __lasx_xvsrli_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrlr.b"] + fn __lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrlr.h"] + fn __lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrlr.w"] + fn __lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrlr.d"] + fn __lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrlri.b"] + fn __lasx_xvsrlri_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrlri.h"] + fn __lasx_xvsrlri_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrlri.w"] + fn __lasx_xvsrlri_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrlri.d"] + fn __lasx_xvsrlri_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvbitclr.b"] + fn __lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitclr.h"] + fn __lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvbitclr.w"] + fn __lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvbitclr.d"] + fn __lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvbitclri.b"] + fn __lasx_xvbitclri_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitclri.h"] + fn __lasx_xvbitclri_h(a: v16u16, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvbitclri.w"] + fn __lasx_xvbitclri_w(a: v8u32, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvbitclri.d"] + fn __lasx_xvbitclri_d(a: v4u64, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvbitset.b"] + fn __lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitset.h"] + fn __lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvbitset.w"] + fn __lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvbitset.d"] + fn __lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvbitseti.b"] + fn __lasx_xvbitseti_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitseti.h"] + fn __lasx_xvbitseti_h(a: v16u16, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvbitseti.w"] + fn __lasx_xvbitseti_w(a: v8u32, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvbitseti.d"] + fn __lasx_xvbitseti_d(a: v4u64, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvbitrev.b"] + fn __lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitrev.h"] + fn __lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvbitrev.w"] + fn __lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvbitrev.d"] + fn __lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvbitrevi.b"] + fn __lasx_xvbitrevi_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitrevi.h"] + fn __lasx_xvbitrevi_h(a: v16u16, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvbitrevi.w"] + fn __lasx_xvbitrevi_w(a: v8u32, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvbitrevi.d"] + fn __lasx_xvbitrevi_d(a: v4u64, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvadd.b"] + fn __lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvadd.h"] + fn __lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvadd.w"] + fn __lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvadd.d"] + fn __lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddi.bu"] + fn __lasx_xvaddi_bu(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvaddi.hu"] + fn __lasx_xvaddi_hu(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvaddi.wu"] + fn __lasx_xvaddi_wu(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddi.du"] + fn __lasx_xvaddi_du(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsub.b"] + fn __lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsub.h"] + fn __lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsub.w"] + fn __lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsub.d"] + fn __lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubi.bu"] + fn __lasx_xvsubi_bu(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsubi.hu"] + fn __lasx_xvsubi_hu(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsubi.wu"] + fn __lasx_xvsubi_wu(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsubi.du"] + fn __lasx_xvsubi_du(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmax.b"] + fn __lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmax.h"] + fn __lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmax.w"] + fn __lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmax.d"] + fn __lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaxi.b"] + fn __lasx_xvmaxi_b(a: v32i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmaxi.h"] + fn __lasx_xvmaxi_h(a: v16i16, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmaxi.w"] + fn __lasx_xvmaxi_w(a: v8i32, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmaxi.d"] + fn __lasx_xvmaxi_d(a: v4i64, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmax.bu"] + fn __lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvmax.hu"] + fn __lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmax.wu"] + fn __lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmax.du"] + fn __lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmaxi.bu"] + fn __lasx_xvmaxi_bu(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvmaxi.hu"] + fn __lasx_xvmaxi_hu(a: v16u16, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmaxi.wu"] + fn __lasx_xvmaxi_wu(a: v8u32, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmaxi.du"] + fn __lasx_xvmaxi_du(a: v4u64, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmin.b"] + fn __lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmin.h"] + fn __lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmin.w"] + fn __lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmin.d"] + fn __lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmini.b"] + fn __lasx_xvmini_b(a: v32i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmini.h"] + fn __lasx_xvmini_h(a: v16i16, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmini.w"] + fn __lasx_xvmini_w(a: v8i32, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmini.d"] + fn __lasx_xvmini_d(a: v4i64, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmin.bu"] + fn __lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvmin.hu"] + fn __lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmin.wu"] + fn __lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmin.du"] + fn __lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmini.bu"] + fn __lasx_xvmini_bu(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvmini.hu"] + fn __lasx_xvmini_hu(a: v16u16, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmini.wu"] + fn __lasx_xvmini_wu(a: v8u32, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmini.du"] + fn __lasx_xvmini_du(a: v4u64, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvseq.b"] + fn __lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvseq.h"] + fn __lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvseq.w"] + fn __lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvseq.d"] + fn __lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvseqi.b"] + fn __lasx_xvseqi_b(a: v32i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvseqi.h"] + fn __lasx_xvseqi_h(a: v16i16, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvseqi.w"] + fn __lasx_xvseqi_w(a: v8i32, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvseqi.d"] + fn __lasx_xvseqi_d(a: v4i64, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslt.b"] + fn __lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslt.h"] + fn __lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslt.w"] + fn __lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslt.d"] + fn __lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslti.b"] + fn __lasx_xvslti_b(a: v32i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslti.h"] + fn __lasx_xvslti_h(a: v16i16, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslti.w"] + fn __lasx_xvslti_w(a: v8i32, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslti.d"] + fn __lasx_xvslti_d(a: v4i64, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslt.bu"] + fn __lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslt.hu"] + fn __lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslt.wu"] + fn __lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslt.du"] + fn __lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslti.bu"] + fn __lasx_xvslti_bu(a: v32u8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslti.hu"] + fn __lasx_xvslti_hu(a: v16u16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslti.wu"] + fn __lasx_xvslti_wu(a: v8u32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslti.du"] + fn __lasx_xvslti_du(a: v4u64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsle.b"] + fn __lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsle.h"] + fn __lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsle.w"] + fn __lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsle.d"] + fn __lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslei.b"] + fn __lasx_xvslei_b(a: v32i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslei.h"] + fn __lasx_xvslei_h(a: v16i16, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslei.w"] + fn __lasx_xvslei_w(a: v8i32, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslei.d"] + fn __lasx_xvslei_d(a: v4i64, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsle.bu"] + fn __lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsle.hu"] + fn __lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsle.wu"] + fn __lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsle.du"] + fn __lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvslei.bu"] + fn __lasx_xvslei_bu(a: v32u8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvslei.hu"] + fn __lasx_xvslei_hu(a: v16u16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvslei.wu"] + fn __lasx_xvslei_wu(a: v8u32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvslei.du"] + fn __lasx_xvslei_du(a: v4u64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsat.b"] + fn __lasx_xvsat_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsat.h"] + fn __lasx_xvsat_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsat.w"] + fn __lasx_xvsat_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsat.d"] + fn __lasx_xvsat_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsat.bu"] + fn __lasx_xvsat_bu(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvsat.hu"] + fn __lasx_xvsat_hu(a: v16u16, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvsat.wu"] + fn __lasx_xvsat_wu(a: v8u32, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvsat.du"] + fn __lasx_xvsat_du(a: v4u64, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvadda.b"] + fn __lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvadda.h"] + fn __lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvadda.w"] + fn __lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvadda.d"] + fn __lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsadd.b"] + fn __lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsadd.h"] + fn __lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsadd.w"] + fn __lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsadd.d"] + fn __lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsadd.bu"] + fn __lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvsadd.hu"] + fn __lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvsadd.wu"] + fn __lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvsadd.du"] + fn __lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvavg.b"] + fn __lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvavg.h"] + fn __lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvavg.w"] + fn __lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvavg.d"] + fn __lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvavg.bu"] + fn __lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvavg.hu"] + fn __lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvavg.wu"] + fn __lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvavg.du"] + fn __lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvavgr.b"] + fn __lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvavgr.h"] + fn __lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvavgr.w"] + fn __lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvavgr.d"] + fn __lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvavgr.bu"] + fn __lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvavgr.hu"] + fn __lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvavgr.wu"] + fn __lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvavgr.du"] + fn __lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvssub.b"] + fn __lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssub.h"] + fn __lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssub.w"] + fn __lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssub.d"] + fn __lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssub.bu"] + fn __lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssub.hu"] + fn __lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssub.wu"] + fn __lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvssub.du"] + fn __lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvabsd.b"] + fn __lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvabsd.h"] + fn __lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvabsd.w"] + fn __lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvabsd.d"] + fn __lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvabsd.bu"] + fn __lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvabsd.hu"] + fn __lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvabsd.wu"] + fn __lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvabsd.du"] + fn __lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmul.b"] + fn __lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmul.h"] + fn __lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmul.w"] + fn __lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmul.d"] + fn __lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmadd.b"] + fn __lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmadd.h"] + fn __lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmadd.w"] + fn __lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmadd.d"] + fn __lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmsub.b"] + fn __lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmsub.h"] + fn __lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmsub.w"] + fn __lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmsub.d"] + fn __lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvdiv.b"] + fn __lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvdiv.h"] + fn __lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvdiv.w"] + fn __lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvdiv.d"] + fn __lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvdiv.bu"] + fn __lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvdiv.hu"] + fn __lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvdiv.wu"] + fn __lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvdiv.du"] + fn __lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvhaddw.h.b"] + fn __lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvhaddw.w.h"] + fn __lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvhaddw.d.w"] + fn __lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvhaddw.hu.bu"] + fn __lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvhaddw.wu.hu"] + fn __lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvhaddw.du.wu"] + fn __lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvhsubw.h.b"] + fn __lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvhsubw.w.h"] + fn __lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvhsubw.d.w"] + fn __lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvhsubw.hu.bu"] + fn __lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvhsubw.wu.hu"] + fn __lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvhsubw.du.wu"] + fn __lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmod.b"] + fn __lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmod.h"] + fn __lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmod.w"] + fn __lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmod.d"] + fn __lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmod.bu"] + fn __lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvmod.hu"] + fn __lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmod.wu"] + fn __lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmod.du"] + fn __lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvrepl128vei.b"] + fn __lasx_xvrepl128vei_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvrepl128vei.h"] + fn __lasx_xvrepl128vei_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvrepl128vei.w"] + fn __lasx_xvrepl128vei_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvrepl128vei.d"] + fn __lasx_xvrepl128vei_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpickev.b"] + fn __lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvpickev.h"] + fn __lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvpickev.w"] + fn __lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpickev.d"] + fn __lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpickod.b"] + fn __lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvpickod.h"] + fn __lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvpickod.w"] + fn __lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpickod.d"] + fn __lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvilvh.b"] + fn __lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvilvh.h"] + fn __lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvilvh.w"] + fn __lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvilvh.d"] + fn __lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvilvl.b"] + fn __lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvilvl.h"] + fn __lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvilvl.w"] + fn __lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvilvl.d"] + fn __lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpackev.b"] + fn __lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvpackev.h"] + fn __lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvpackev.w"] + fn __lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpackev.d"] + fn __lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpackod.b"] + fn __lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvpackod.h"] + fn __lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvpackod.w"] + fn __lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpackod.d"] + fn __lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvshuf.b"] + fn __lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvshuf.h"] + fn __lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvshuf.w"] + fn __lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvshuf.d"] + fn __lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvand.v"] + fn __lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvandi.b"] + fn __lasx_xvandi_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvor.v"] + fn __lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvori.b"] + fn __lasx_xvori_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvnor.v"] + fn __lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvnori.b"] + fn __lasx_xvnori_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvxor.v"] + fn __lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvxori.b"] + fn __lasx_xvxori_b(a: v32u8, b: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitsel.v"] + fn __lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvbitseli.b"] + fn __lasx_xvbitseli_b(a: v32u8, b: v32u8, c: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvshuf4i.b"] + fn __lasx_xvshuf4i_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvshuf4i.h"] + fn __lasx_xvshuf4i_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvshuf4i.w"] + fn __lasx_xvshuf4i_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.b"] + fn __lasx_xvreplgr2vr_b(a: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.h"] + fn __lasx_xvreplgr2vr_h(a: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.w"] + fn __lasx_xvreplgr2vr_w(a: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.d"] + fn __lasx_xvreplgr2vr_d(a: i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpcnt.b"] + fn __lasx_xvpcnt_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvpcnt.h"] + fn __lasx_xvpcnt_h(a: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvpcnt.w"] + fn __lasx_xvpcnt_w(a: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpcnt.d"] + fn __lasx_xvpcnt_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvclo.b"] + fn __lasx_xvclo_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvclo.h"] + fn __lasx_xvclo_h(a: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvclo.w"] + fn __lasx_xvclo_w(a: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvclo.d"] + fn __lasx_xvclo_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvclz.b"] + fn __lasx_xvclz_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvclz.h"] + fn __lasx_xvclz_h(a: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvclz.w"] + fn __lasx_xvclz_w(a: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvclz.d"] + fn __lasx_xvclz_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfadd.s"] + fn __lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfadd.d"] + fn __lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfsub.s"] + fn __lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfsub.d"] + fn __lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfmul.s"] + fn __lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmul.d"] + fn __lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfdiv.s"] + fn __lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfdiv.d"] + fn __lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfcvt.h.s"] + fn __lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvfcvt.s.d"] + fn __lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmin.s"] + fn __lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmin.d"] + fn __lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfmina.s"] + fn __lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmina.d"] + fn __lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfmax.s"] + fn __lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmax.d"] + fn __lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfmaxa.s"] + fn __lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmaxa.d"] + fn __lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfclass.s"] + fn __lasx_xvfclass_s(a: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfclass.d"] + fn __lasx_xvfclass_d(a: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfsqrt.s"] + fn __lasx_xvfsqrt_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfsqrt.d"] + fn __lasx_xvfsqrt_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfrecip.s"] + fn __lasx_xvfrecip_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrecip.d"] + fn __lasx_xvfrecip_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfrint.s"] + fn __lasx_xvfrint_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrint.d"] + fn __lasx_xvfrint_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfrsqrt.s"] + fn __lasx_xvfrsqrt_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrsqrt.d"] + fn __lasx_xvfrsqrt_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvflogb.s"] + fn __lasx_xvflogb_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvflogb.d"] + fn __lasx_xvflogb_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfcvth.s.h"] + fn __lasx_xvfcvth_s_h(a: v16i16) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfcvth.d.s"] + fn __lasx_xvfcvth_d_s(a: v8f32) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfcvtl.s.h"] + fn __lasx_xvfcvtl_s_h(a: v16i16) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfcvtl.d.s"] + fn __lasx_xvfcvtl_d_s(a: v8f32) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvftint.w.s"] + fn __lasx_xvftint_w_s(a: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftint.l.d"] + fn __lasx_xvftint_l_d(a: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftint.wu.s"] + fn __lasx_xvftint_wu_s(a: v8f32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvftint.lu.d"] + fn __lasx_xvftint_lu_d(a: v4f64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvftintrz.w.s"] + fn __lasx_xvftintrz_w_s(a: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrz.l.d"] + fn __lasx_xvftintrz_l_d(a: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrz.wu.s"] + fn __lasx_xvftintrz_wu_s(a: v8f32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvftintrz.lu.d"] + fn __lasx_xvftintrz_lu_d(a: v4f64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvffint.s.w"] + fn __lasx_xvffint_s_w(a: v8i32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvffint.d.l"] + fn __lasx_xvffint_d_l(a: v4i64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvffint.s.wu"] + fn __lasx_xvffint_s_wu(a: v8u32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvffint.d.lu"] + fn __lasx_xvffint_d_lu(a: v4u64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvreplve.b"] + fn __lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvreplve.h"] + fn __lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvreplve.w"] + fn __lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvreplve.d"] + fn __lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpermi.w"] + fn __lasx_xvpermi_w(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvandn.v"] + fn __lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvneg.b"] + fn __lasx_xvneg_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvneg.h"] + fn __lasx_xvneg_h(a: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvneg.w"] + fn __lasx_xvneg_w(a: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvneg.d"] + fn __lasx_xvneg_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmuh.b"] + fn __lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmuh.h"] + fn __lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmuh.w"] + fn __lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmuh.d"] + fn __lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmuh.bu"] + fn __lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvmuh.hu"] + fn __lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmuh.wu"] + fn __lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmuh.du"] + fn __lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvsllwil.h.b"] + fn __lasx_xvsllwil_h_b(a: v32i8, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsllwil.w.h"] + fn __lasx_xvsllwil_w_h(a: v16i16, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsllwil.d.w"] + fn __lasx_xvsllwil_d_w(a: v8i32, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsllwil.hu.bu"] + fn __lasx_xvsllwil_hu_bu(a: v32u8, b: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvsllwil.wu.hu"] + fn __lasx_xvsllwil_wu_hu(a: v16u16, b: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvsllwil.du.wu"] + fn __lasx_xvsllwil_du_wu(a: v8u32, b: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvsran.b.h"] + fn __lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsran.h.w"] + fn __lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsran.w.d"] + fn __lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssran.b.h"] + fn __lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssran.h.w"] + fn __lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssran.w.d"] + fn __lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssran.bu.h"] + fn __lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssran.hu.w"] + fn __lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssran.wu.d"] + fn __lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvsrarn.b.h"] + fn __lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrarn.h.w"] + fn __lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrarn.w.d"] + fn __lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrarn.b.h"] + fn __lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrarn.h.w"] + fn __lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrarn.w.d"] + fn __lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrarn.bu.h"] + fn __lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrarn.hu.w"] + fn __lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrarn.wu.d"] + fn __lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvsrln.b.h"] + fn __lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrln.h.w"] + fn __lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrln.w.d"] + fn __lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrln.bu.h"] + fn __lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrln.hu.w"] + fn __lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrln.wu.d"] + fn __lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvsrlrn.b.h"] + fn __lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrlrn.h.w"] + fn __lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrlrn.w.d"] + fn __lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrlrn.bu.h"] + fn __lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrlrn.hu.w"] + fn __lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrlrn.wu.d"] + fn __lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvfrstpi.b"] + fn __lasx_xvfrstpi_b(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvfrstpi.h"] + fn __lasx_xvfrstpi_h(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvfrstp.b"] + fn __lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvfrstp.h"] + fn __lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvshuf4i.d"] + fn __lasx_xvshuf4i_d(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvbsrl.v"] + fn __lasx_xvbsrl_v(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvbsll.v"] + fn __lasx_xvbsll_v(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvextrins.b"] + fn __lasx_xvextrins_b(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvextrins.h"] + fn __lasx_xvextrins_h(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvextrins.w"] + fn __lasx_xvextrins_w(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvextrins.d"] + fn __lasx_xvextrins_d(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmskltz.b"] + fn __lasx_xvmskltz_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmskltz.h"] + fn __lasx_xvmskltz_h(a: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmskltz.w"] + fn __lasx_xvmskltz_w(a: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmskltz.d"] + fn __lasx_xvmskltz_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsigncov.b"] + fn __lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsigncov.h"] + fn __lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsigncov.w"] + fn __lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsigncov.d"] + fn __lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfmadd.s"] + fn __lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmadd.d"] + fn __lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfmsub.s"] + fn __lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfmsub.d"] + fn __lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfnmadd.s"] + fn __lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfnmadd.d"] + fn __lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfnmsub.s"] + fn __lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfnmsub.d"] + fn __lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvftintrne.w.s"] + fn __lasx_xvftintrne_w_s(a: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrne.l.d"] + fn __lasx_xvftintrne_l_d(a: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrp.w.s"] + fn __lasx_xvftintrp_w_s(a: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrp.l.d"] + fn __lasx_xvftintrp_l_d(a: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrm.w.s"] + fn __lasx_xvftintrm_w_s(a: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrm.l.d"] + fn __lasx_xvftintrm_l_d(a: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftint.w.d"] + fn __lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvffint.s.l"] + fn __lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvftintrz.w.d"] + fn __lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrp.w.d"] + fn __lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrm.w.d"] + fn __lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftintrne.w.d"] + fn __lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvftinth.l.s"] + fn __lasx_xvftinth_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintl.l.s"] + fn __lasx_xvftintl_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvffinth.d.w"] + fn __lasx_xvffinth_d_w(a: v8i32) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvffintl.d.w"] + fn __lasx_xvffintl_d_w(a: v8i32) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvftintrzh.l.s"] + fn __lasx_xvftintrzh_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrzl.l.s"] + fn __lasx_xvftintrzl_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrph.l.s"] + fn __lasx_xvftintrph_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrpl.l.s"] + fn __lasx_xvftintrpl_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrmh.l.s"] + fn __lasx_xvftintrmh_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrml.l.s"] + fn __lasx_xvftintrml_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrneh.l.s"] + fn __lasx_xvftintrneh_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvftintrnel.l.s"] + fn __lasx_xvftintrnel_l_s(a: v8f32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfrintrne.s"] + fn __lasx_xvfrintrne_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrintrne.d"] + fn __lasx_xvfrintrne_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfrintrz.s"] + fn __lasx_xvfrintrz_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrintrz.d"] + fn __lasx_xvfrintrz_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfrintrp.s"] + fn __lasx_xvfrintrp_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrintrp.d"] + fn __lasx_xvfrintrp_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvfrintrm.s"] + fn __lasx_xvfrintrm_s(a: v8f32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvfrintrm.d"] + fn __lasx_xvfrintrm_d(a: v4f64) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvld"] + fn __lasx_xvld(a: *const i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvst"] + fn __lasx_xvst(a: v32i8, b: *mut i8, c: i32) ; + #[link_name = "llvm.loongarch.lasx.xvstelm.b"] + fn __lasx_xvstelm_b(a: v32i8, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lasx.xvstelm.h"] + fn __lasx_xvstelm_h(a: v16i16, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lasx.xvstelm.w"] + fn __lasx_xvstelm_w(a: v8i32, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lasx.xvstelm.d"] + fn __lasx_xvstelm_d(a: v4i64, b: *mut i8, c: i32, d: u32) ; + #[link_name = "llvm.loongarch.lasx.xvinsve0.w"] + fn __lasx_xvinsve0_w(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvinsve0.d"] + fn __lasx_xvinsve0_d(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpickve.w"] + fn __lasx_xvpickve_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpickve.d"] + fn __lasx_xvpickve_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrlrn.b.h"] + fn __lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrlrn.h.w"] + fn __lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrlrn.w.d"] + fn __lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrln.b.h"] + fn __lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrln.h.w"] + fn __lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrln.w.d"] + fn __lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvorn.v"] + fn __lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvldi"] + fn __lasx_xvldi(a: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvldx"] + fn __lasx_xvldx(a: *const i8, b: i64) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvstx"] + fn __lasx_xvstx(a: v32i8, b: *mut i8, c: i64) ; + #[link_name = "llvm.loongarch.lasx.xvextl.qu.du"] + fn __lasx_xvextl_qu_du(a: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.w"] + fn __lasx_xvinsgr2vr_w(a: v8i32, b: i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.d"] + fn __lasx_xvinsgr2vr_d(a: v4i64, b: i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvreplve0.b"] + fn __lasx_xvreplve0_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvreplve0.h"] + fn __lasx_xvreplve0_h(a: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvreplve0.w"] + fn __lasx_xvreplve0_w(a: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvreplve0.d"] + fn __lasx_xvreplve0_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvreplve0.q"] + fn __lasx_xvreplve0_q(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.vext2xv.h.b"] + fn __lasx_vext2xv_h_b(a: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.vext2xv.w.h"] + fn __lasx_vext2xv_w_h(a: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.vext2xv.d.w"] + fn __lasx_vext2xv_d_w(a: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.vext2xv.w.b"] + fn __lasx_vext2xv_w_b(a: v32i8) -> v8i32; + #[link_name = "llvm.loongarch.lasx.vext2xv.d.h"] + fn __lasx_vext2xv_d_h(a: v16i16) -> v4i64; + #[link_name = "llvm.loongarch.lasx.vext2xv.d.b"] + fn __lasx_vext2xv_d_b(a: v32i8) -> v4i64; + #[link_name = "llvm.loongarch.lasx.vext2xv.hu.bu"] + fn __lasx_vext2xv_hu_bu(a: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.vext2xv.wu.hu"] + fn __lasx_vext2xv_wu_hu(a: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.vext2xv.du.wu"] + fn __lasx_vext2xv_du_wu(a: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.vext2xv.wu.bu"] + fn __lasx_vext2xv_wu_bu(a: v32i8) -> v8i32; + #[link_name = "llvm.loongarch.lasx.vext2xv.du.hu"] + fn __lasx_vext2xv_du_hu(a: v16i16) -> v4i64; + #[link_name = "llvm.loongarch.lasx.vext2xv.du.bu"] + fn __lasx_vext2xv_du_bu(a: v32i8) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpermi.q"] + fn __lasx_xvpermi_q(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvpermi.d"] + fn __lasx_xvpermi_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvperm.w"] + fn __lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvldrepl.b"] + fn __lasx_xvldrepl_b(a: *const i8, b: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvldrepl.h"] + fn __lasx_xvldrepl_h(a: *const i8, b: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvldrepl.w"] + fn __lasx_xvldrepl_w(a: *const i8, b: i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvldrepl.d"] + fn __lasx_xvldrepl_d(a: *const i8, b: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvpickve2gr.w"] + fn __lasx_xvpickve2gr_w(a: v8i32, b: u32) -> i32; + #[link_name = "llvm.loongarch.lasx.xvpickve2gr.wu"] + fn __lasx_xvpickve2gr_wu(a: v8i32, b: u32) -> u32; + #[link_name = "llvm.loongarch.lasx.xvpickve2gr.d"] + fn __lasx_xvpickve2gr_d(a: v4i64, b: u32) -> i64; + #[link_name = "llvm.loongarch.lasx.xvpickve2gr.du"] + fn __lasx_xvpickve2gr_du(a: v4i64, b: u32) -> u64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.q.d"] + fn __lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.d.w"] + fn __lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.w.h"] + fn __lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddwev.h.b"] + fn __lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvaddwev.q.du"] + fn __lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.d.wu"] + fn __lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.w.hu"] + fn __lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddwev.h.bu"] + fn __lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsubwev.q.d"] + fn __lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwev.d.w"] + fn __lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwev.w.h"] + fn __lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsubwev.h.b"] + fn __lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsubwev.q.du"] + fn __lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwev.d.wu"] + fn __lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwev.w.hu"] + fn __lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsubwev.h.bu"] + fn __lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmulwev.q.d"] + fn __lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwev.d.w"] + fn __lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwev.w.h"] + fn __lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmulwev.h.b"] + fn __lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmulwev.q.du"] + fn __lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwev.d.wu"] + fn __lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwev.w.hu"] + fn __lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmulwev.h.bu"] + fn __lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvaddwod.q.d"] + fn __lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwod.d.w"] + fn __lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwod.w.h"] + fn __lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddwod.h.b"] + fn __lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvaddwod.q.du"] + fn __lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwod.d.wu"] + fn __lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwod.w.hu"] + fn __lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddwod.h.bu"] + fn __lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsubwod.q.d"] + fn __lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwod.d.w"] + fn __lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwod.w.h"] + fn __lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsubwod.h.b"] + fn __lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsubwod.q.du"] + fn __lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwod.d.wu"] + fn __lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsubwod.w.hu"] + fn __lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsubwod.h.bu"] + fn __lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmulwod.q.d"] + fn __lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwod.d.w"] + fn __lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwod.w.h"] + fn __lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmulwod.h.b"] + fn __lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmulwod.q.du"] + fn __lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwod.d.wu"] + fn __lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwod.w.hu"] + fn __lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmulwod.h.bu"] + fn __lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvaddwev.d.wu.w"] + fn __lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.w.hu.h"] + fn __lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddwev.h.bu.b"] + fn __lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmulwev.d.wu.w"] + fn __lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwev.w.hu.h"] + fn __lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmulwev.h.bu.b"] + fn __lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvaddwod.d.wu.w"] + fn __lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwod.w.hu.h"] + fn __lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvaddwod.h.bu.b"] + fn __lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmulwod.d.wu.w"] + fn __lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwod.w.hu.h"] + fn __lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmulwod.h.bu.b"] + fn __lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvhaddw.q.d"] + fn __lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvhaddw.qu.du"] + fn __lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvhsubw.q.d"] + fn __lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvhsubw.qu.du"] + fn __lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.d"] + fn __lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.w"] + fn __lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.h"] + fn __lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.b"] + fn __lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.du"] + fn __lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.wu"] + fn __lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.hu"] + fn __lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.bu"] + fn __lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.d"] + fn __lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.w"] + fn __lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.h"] + fn __lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.b"] + fn __lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.du"] + fn __lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.wu"] + fn __lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.hu"] + fn __lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.bu"] + fn __lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.du.d"] + fn __lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.wu.w"] + fn __lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.hu.h"] + fn __lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.bu.b"] + fn __lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.du.d"] + fn __lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.wu.w"] + fn __lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.hu.h"] + fn __lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.bu.b"] + fn __lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvrotr.b"] + fn __lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvrotr.h"] + fn __lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvrotr.w"] + fn __lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvrotr.d"] + fn __lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvadd.q"] + fn __lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsub.q"] + fn __lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwev.q.du.d"] + fn __lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvaddwod.q.du.d"] + fn __lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwev.q.du.d"] + fn __lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmulwod.q.du.d"] + fn __lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvmskgez.b"] + fn __lasx_xvmskgez_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvmsknz.b"] + fn __lasx_xvmsknz_b(a: v32i8) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvexth.h.b"] + fn __lasx_xvexth_h_b(a: v32i8) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvexth.w.h"] + fn __lasx_xvexth_w_h(a: v16i16) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvexth.d.w"] + fn __lasx_xvexth_d_w(a: v8i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvexth.q.d"] + fn __lasx_xvexth_q_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvexth.hu.bu"] + fn __lasx_xvexth_hu_bu(a: v32u8) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvexth.wu.hu"] + fn __lasx_xvexth_wu_hu(a: v16u16) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvexth.du.wu"] + fn __lasx_xvexth_du_wu(a: v8u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvexth.qu.du"] + fn __lasx_xvexth_qu_du(a: v4u64) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvrotri.b"] + fn __lasx_xvrotri_b(a: v32i8, b: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvrotri.h"] + fn __lasx_xvrotri_h(a: v16i16, b: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvrotri.w"] + fn __lasx_xvrotri_w(a: v8i32, b: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvrotri.d"] + fn __lasx_xvrotri_d(a: v4i64, b: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvextl.q.d"] + fn __lasx_xvextl_q_d(a: v4i64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrlni.b.h"] + fn __lasx_xvsrlni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrlni.h.w"] + fn __lasx_xvsrlni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrlni.w.d"] + fn __lasx_xvsrlni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrlni.d.q"] + fn __lasx_xvsrlni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrlrni.b.h"] + fn __lasx_xvsrlrni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrlrni.h.w"] + fn __lasx_xvsrlrni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrlrni.w.d"] + fn __lasx_xvsrlrni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrlrni.d.q"] + fn __lasx_xvsrlrni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrlni.b.h"] + fn __lasx_xvssrlni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrlni.h.w"] + fn __lasx_xvssrlni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrlni.w.d"] + fn __lasx_xvssrlni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrlni.d.q"] + fn __lasx_xvssrlni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrlni.bu.h"] + fn __lasx_xvssrlni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrlni.hu.w"] + fn __lasx_xvssrlni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrlni.wu.d"] + fn __lasx_xvssrlni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvssrlni.du.q"] + fn __lasx_xvssrlni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.b.h"] + fn __lasx_xvssrlrni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.h.w"] + fn __lasx_xvssrlrni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.w.d"] + fn __lasx_xvssrlrni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.d.q"] + fn __lasx_xvssrlrni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.bu.h"] + fn __lasx_xvssrlrni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.hu.w"] + fn __lasx_xvssrlrni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.wu.d"] + fn __lasx_xvssrlrni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvssrlrni.du.q"] + fn __lasx_xvssrlrni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvsrani.b.h"] + fn __lasx_xvsrani_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrani.h.w"] + fn __lasx_xvsrani_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrani.w.d"] + fn __lasx_xvsrani_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrani.d.q"] + fn __lasx_xvsrani_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvsrarni.b.h"] + fn __lasx_xvsrarni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvsrarni.h.w"] + fn __lasx_xvsrarni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvsrarni.w.d"] + fn __lasx_xvsrarni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvsrarni.d.q"] + fn __lasx_xvsrarni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrani.b.h"] + fn __lasx_xvssrani_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrani.h.w"] + fn __lasx_xvssrani_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrani.w.d"] + fn __lasx_xvssrani_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrani.d.q"] + fn __lasx_xvssrani_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrani.bu.h"] + fn __lasx_xvssrani_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrani.hu.w"] + fn __lasx_xvssrani_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrani.wu.d"] + fn __lasx_xvssrani_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvssrani.du.q"] + fn __lasx_xvssrani_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xvssrarni.b.h"] + fn __lasx_xvssrarni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvssrarni.h.w"] + fn __lasx_xvssrarni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvssrarni.w.d"] + fn __lasx_xvssrarni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvssrarni.d.q"] + fn __lasx_xvssrarni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvssrarni.bu.h"] + fn __lasx_xvssrarni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8; + #[link_name = "llvm.loongarch.lasx.xvssrarni.hu.w"] + fn __lasx_xvssrarni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16; + #[link_name = "llvm.loongarch.lasx.xvssrarni.wu.d"] + fn __lasx_xvssrarni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32; + #[link_name = "llvm.loongarch.lasx.xvssrarni.du.q"] + fn __lasx_xvssrarni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64; + #[link_name = "llvm.loongarch.lasx.xbnz.b"] + fn __lasx_xbnz_b(a: v32u8) -> i32; + #[link_name = "llvm.loongarch.lasx.xbnz.d"] + fn __lasx_xbnz_d(a: v4u64) -> i32; + #[link_name = "llvm.loongarch.lasx.xbnz.h"] + fn __lasx_xbnz_h(a: v16u16) -> i32; + #[link_name = "llvm.loongarch.lasx.xbnz.v"] + fn __lasx_xbnz_v(a: v32u8) -> i32; + #[link_name = "llvm.loongarch.lasx.xbnz.w"] + fn __lasx_xbnz_w(a: v8u32) -> i32; + #[link_name = "llvm.loongarch.lasx.xbz.b"] + fn __lasx_xbz_b(a: v32u8) -> i32; + #[link_name = "llvm.loongarch.lasx.xbz.d"] + fn __lasx_xbz_d(a: v4u64) -> i32; + #[link_name = "llvm.loongarch.lasx.xbz.h"] + fn __lasx_xbz_h(a: v16u16) -> i32; + #[link_name = "llvm.loongarch.lasx.xbz.v"] + fn __lasx_xbz_v(a: v32u8) -> i32; + #[link_name = "llvm.loongarch.lasx.xbz.w"] + fn __lasx_xbz_w(a: v8u32) -> i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.caf.d"] + fn __lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.caf.s"] + fn __lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.ceq.d"] + fn __lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.ceq.s"] + fn __lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cle.d"] + fn __lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cle.s"] + fn __lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.clt.d"] + fn __lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.clt.s"] + fn __lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cne.d"] + fn __lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cne.s"] + fn __lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cor.d"] + fn __lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cor.s"] + fn __lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cueq.d"] + fn __lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cueq.s"] + fn __lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cule.d"] + fn __lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cule.s"] + fn __lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cult.d"] + fn __lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cult.s"] + fn __lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cun.d"] + fn __lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cune.d"] + fn __lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cune.s"] + fn __lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.cun.s"] + fn __lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.saf.d"] + fn __lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.saf.s"] + fn __lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.seq.d"] + fn __lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.seq.s"] + fn __lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sle.d"] + fn __lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sle.s"] + fn __lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.slt.d"] + fn __lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.slt.s"] + fn __lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sne.d"] + fn __lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sne.s"] + fn __lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sor.d"] + fn __lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sor.s"] + fn __lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sueq.d"] + fn __lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sueq.s"] + fn __lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sule.d"] + fn __lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sule.s"] + fn __lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sult.d"] + fn __lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sult.s"] + fn __lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sun.d"] + fn __lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sune.d"] + fn __lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sune.s"] + fn __lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvfcmp.sun.s"] + fn __lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32; + #[link_name = "llvm.loongarch.lasx.xvpickve.d.f"] + fn __lasx_xvpickve_d_f(a: v4f64, b: u32) -> v4f64; + #[link_name = "llvm.loongarch.lasx.xvpickve.w.f"] + fn __lasx_xvpickve_w_f(a: v8f32, b: u32) -> v8f32; + #[link_name = "llvm.loongarch.lasx.xvrepli.b"] + fn __lasx_xvrepli_b(a: i32) -> v32i8; + #[link_name = "llvm.loongarch.lasx.xvrepli.d"] + fn __lasx_xvrepli_d(a: i32) -> v4i64; + #[link_name = "llvm.loongarch.lasx.xvrepli.h"] + fn __lasx_xvrepli_h(a: i32) -> v16i16; + #[link_name = "llvm.loongarch.lasx.xvrepli.w"] + fn __lasx_xvrepli_w(a: i32) -> v8i32; +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsll_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsll_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsll_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsll_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslli_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvslli_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslli_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvslli_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslli_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslli_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslli_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvslli_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsra_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsra_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsra_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsra_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrai_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsrai_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrai_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrai_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrai_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrai_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrai_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrai_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsrar_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsrar_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsrar_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsrar_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrari_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsrari_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrari_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrari_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrari_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrari_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrari_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrari_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsrl_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsrl_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsrl_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsrl_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrli_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsrli_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrli_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrli_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrli_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrli_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrli_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrli_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsrlr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsrlr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsrlr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsrlr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlri_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsrlri_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlri_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrlri_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlri_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrlri_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlri_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrlri_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvbitclr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvbitclr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvbitclr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvbitclr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclri_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvbitclri_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclri_h(a: v16u16) -> v16u16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvbitclri_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclri_w(a: v8u32) -> v8u32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvbitclri_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitclri_d(a: v4u64) -> v4u64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvbitclri_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvbitset_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvbitset_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvbitset_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvbitset_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitseti_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvbitseti_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitseti_h(a: v16u16) -> v16u16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvbitseti_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitseti_w(a: v8u32) -> v8u32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvbitseti_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitseti_d(a: v4u64) -> v4u64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvbitseti_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvbitrev_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvbitrev_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvbitrev_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvbitrev_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrevi_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvbitrevi_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrevi_h(a: v16u16) -> v16u16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvbitrevi_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrevi_w(a: v8u32) -> v8u32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvbitrevi_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitrevi_d(a: v4u64) -> v4u64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvbitrevi_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvadd_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvadd_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvadd_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvadd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddi_bu(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvaddi_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddi_hu(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvaddi_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddi_wu(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvaddi_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddi_du(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvaddi_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsub_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsub_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsub_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsub_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubi_bu(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsubi_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubi_hu(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsubi_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubi_wu(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsubi_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubi_du(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsubi_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvmax_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvmax_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvmax_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmax_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_b(a: v32i8) -> v32i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmaxi_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_h(a: v16i16) -> v16i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmaxi_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_w(a: v8i32) -> v8i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmaxi_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_d(a: v4i64) -> v4i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmaxi_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvmax_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvmax_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvmax_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvmax_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_bu(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmaxi_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_hu(a: v16u16) -> v16u16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmaxi_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_wu(a: v8u32) -> v8u32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmaxi_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaxi_du(a: v4u64) -> v4u64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmaxi_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvmin_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvmin_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvmin_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmin_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_b(a: v32i8) -> v32i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmini_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_h(a: v16i16) -> v16i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmini_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_w(a: v8i32) -> v8i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmini_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_d(a: v4i64) -> v4i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvmini_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvmin_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvmin_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvmin_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvmin_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_bu(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmini_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_hu(a: v16u16) -> v16u16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmini_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_wu(a: v8u32) -> v8u32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmini_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmini_du(a: v4u64) -> v4u64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvmini_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvseq_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvseq_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvseq_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvseq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseqi_b(a: v32i8) -> v32i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvseqi_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseqi_h(a: v16i16) -> v16i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvseqi_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseqi_w(a: v8i32) -> v8i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvseqi_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvseqi_d(a: v4i64) -> v4i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvseqi_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvslt_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvslt_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvslt_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvslt_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_b(a: v32i8) -> v32i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslti_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_h(a: v16i16) -> v16i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslti_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_w(a: v8i32) -> v8i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslti_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_d(a: v4i64) -> v4i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslti_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8 { + __lasx_xvslt_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16 { + __lasx_xvslt_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32 { + __lasx_xvslt_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvslt_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_bu(a: v32u8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslti_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_hu(a: v16u16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslti_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_wu(a: v8u32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslti_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslti_du(a: v4u64) -> v4i64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslti_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsle_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsle_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsle_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsle_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_b(a: v32i8) -> v32i8 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslei_b(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_h(a: v16i16) -> v16i16 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslei_h(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_w(a: v8i32) -> v8i32 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslei_w(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_d(a: v4i64) -> v4i64 { + static_assert_simm_bits!(IMM_S5, 5); + __lasx_xvslei_d(a, IMM_S5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8 { + __lasx_xvsle_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16 { + __lasx_xvsle_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32 { + __lasx_xvsle_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvsle_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_bu(a: v32u8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslei_bu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_hu(a: v16u16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslei_hu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_wu(a: v8u32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslei_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvslei_du(a: v4u64) -> v4i64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvslei_du(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsat_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsat_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsat_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsat_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_bu(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsat_bu(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_hu(a: v16u16) -> v16u16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsat_hu(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_wu(a: v8u32) -> v8u32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsat_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsat_du(a: v4u64) -> v4u64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsat_du(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvadda_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvadda_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvadda_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvadda_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsadd_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsadd_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsadd_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsadd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvsadd_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvsadd_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvsadd_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvsadd_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvavg_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvavg_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvavg_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvavg_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvavg_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvavg_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvavg_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvavg_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvavgr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvavgr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvavgr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvavgr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvavgr_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvavgr_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvavgr_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvavgr_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvssub_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvssub_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvssub_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvssub_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvssub_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvssub_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvssub_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvssub_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvabsd_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvabsd_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvabsd_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvabsd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvabsd_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvabsd_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvabsd_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvabsd_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvmul_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvmul_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvmul_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmul_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + __lasx_xvmadd_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + __lasx_xvmadd_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { + __lasx_xvmadd_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + __lasx_xvmadd_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + __lasx_xvmsub_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + __lasx_xvmsub_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { + __lasx_xvmsub_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + __lasx_xvmsub_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvdiv_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvdiv_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvdiv_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvdiv_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvdiv_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvdiv_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvdiv_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvdiv_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvhaddw_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvhaddw_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvhaddw_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16 { + __lasx_xvhaddw_hu_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32 { + __lasx_xvhaddw_wu_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64 { + __lasx_xvhaddw_du_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvhsubw_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvhsubw_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvhsubw_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvhsubw_hu_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvhsubw_wu_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvhsubw_du_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvmod_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvmod_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvmod_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmod_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvmod_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvmod_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvmod_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvmod_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepl128vei_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvrepl128vei_b(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepl128vei_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvrepl128vei_h(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepl128vei_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvrepl128vei_w(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepl128vei_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM1, 1); + __lasx_xvrepl128vei_d(a, IMM1) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvpickev_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvpickev_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvpickev_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvpickev_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvpickod_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvpickod_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvpickod_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvpickod_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvilvh_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvilvh_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvilvh_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvilvh_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvilvl_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvilvl_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvilvl_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvilvl_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvpackev_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvpackev_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvpackev_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvpackev_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvpackod_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvpackod_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvpackod_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvpackod_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + __lasx_xvshuf_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + __lasx_xvshuf_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { + __lasx_xvshuf_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + __lasx_xvshuf_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvand_v(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvandi_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvandi_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvor_v(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvori_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvori_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvnor_v(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvnori_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvnori_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvxor_v(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvxori_b(a: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvxori_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8 { + __lasx_xvbitsel_v(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbitseli_b(a: v32u8, b: v32u8) -> v32u8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvbitseli_b(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf4i_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvshuf4i_b(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf4i_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvshuf4i_h(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf4i_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvshuf4i_w(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplgr2vr_b(a: i32) -> v32i8 { + __lasx_xvreplgr2vr_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplgr2vr_h(a: i32) -> v16i16 { + __lasx_xvreplgr2vr_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplgr2vr_w(a: i32) -> v8i32 { + __lasx_xvreplgr2vr_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplgr2vr_d(a: i64) -> v4i64 { + __lasx_xvreplgr2vr_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpcnt_b(a: v32i8) -> v32i8 { + __lasx_xvpcnt_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpcnt_h(a: v16i16) -> v16i16 { + __lasx_xvpcnt_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpcnt_w(a: v8i32) -> v8i32 { + __lasx_xvpcnt_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpcnt_d(a: v4i64) -> v4i64 { + __lasx_xvpcnt_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclo_b(a: v32i8) -> v32i8 { + __lasx_xvclo_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclo_h(a: v16i16) -> v16i16 { + __lasx_xvclo_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclo_w(a: v8i32) -> v8i32 { + __lasx_xvclo_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclo_d(a: v4i64) -> v4i64 { + __lasx_xvclo_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclz_b(a: v32i8) -> v32i8 { + __lasx_xvclz_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclz_h(a: v16i16) -> v16i16 { + __lasx_xvclz_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclz_w(a: v8i32) -> v8i32 { + __lasx_xvclz_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvclz_d(a: v4i64) -> v4i64 { + __lasx_xvclz_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfadd_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfadd_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfsub_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfsub_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfmul_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfmul_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfdiv_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfdiv_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16 { + __lasx_xvfcvt_h_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32 { + __lasx_xvfcvt_s_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfmin_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfmin_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfmina_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfmina_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfmax_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfmax_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32 { + __lasx_xvfmaxa_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64 { + __lasx_xvfmaxa_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfclass_s(a: v8f32) -> v8i32 { + __lasx_xvfclass_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfclass_d(a: v4f64) -> v4i64 { + __lasx_xvfclass_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfsqrt_s(a: v8f32) -> v8f32 { + __lasx_xvfsqrt_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfsqrt_d(a: v4f64) -> v4f64 { + __lasx_xvfsqrt_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrecip_s(a: v8f32) -> v8f32 { + __lasx_xvfrecip_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrecip_d(a: v4f64) -> v4f64 { + __lasx_xvfrecip_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrint_s(a: v8f32) -> v8f32 { + __lasx_xvfrint_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrint_d(a: v4f64) -> v4f64 { + __lasx_xvfrint_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrsqrt_s(a: v8f32) -> v8f32 { + __lasx_xvfrsqrt_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrsqrt_d(a: v4f64) -> v4f64 { + __lasx_xvfrsqrt_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvflogb_s(a: v8f32) -> v8f32 { + __lasx_xvflogb_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvflogb_d(a: v4f64) -> v4f64 { + __lasx_xvflogb_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcvth_s_h(a: v16i16) -> v8f32 { + __lasx_xvfcvth_s_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcvth_d_s(a: v8f32) -> v4f64 { + __lasx_xvfcvth_d_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcvtl_s_h(a: v16i16) -> v8f32 { + __lasx_xvfcvtl_s_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcvtl_d_s(a: v8f32) -> v4f64 { + __lasx_xvfcvtl_d_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftint_w_s(a: v8f32) -> v8i32 { + __lasx_xvftint_w_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftint_l_d(a: v4f64) -> v4i64 { + __lasx_xvftint_l_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftint_wu_s(a: v8f32) -> v8u32 { + __lasx_xvftint_wu_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftint_lu_d(a: v4f64) -> v4u64 { + __lasx_xvftint_lu_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrz_w_s(a: v8f32) -> v8i32 { + __lasx_xvftintrz_w_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrz_l_d(a: v4f64) -> v4i64 { + __lasx_xvftintrz_l_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrz_wu_s(a: v8f32) -> v8u32 { + __lasx_xvftintrz_wu_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrz_lu_d(a: v4f64) -> v4u64 { + __lasx_xvftintrz_lu_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffint_s_w(a: v8i32) -> v8f32 { + __lasx_xvffint_s_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffint_d_l(a: v4i64) -> v4f64 { + __lasx_xvffint_d_l(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffint_s_wu(a: v8u32) -> v8f32 { + __lasx_xvffint_s_wu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffint_d_lu(a: v4u64) -> v4f64 { + __lasx_xvffint_d_lu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8 { + __lasx_xvreplve_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16 { + __lasx_xvreplve_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32 { + __lasx_xvreplve_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64 { + __lasx_xvreplve_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpermi_w(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvpermi_w(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvandn_v(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvneg_b(a: v32i8) -> v32i8 { + __lasx_xvneg_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvneg_h(a: v16i16) -> v16i16 { + __lasx_xvneg_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvneg_w(a: v8i32) -> v8i32 { + __lasx_xvneg_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvneg_d(a: v4i64) -> v4i64 { + __lasx_xvneg_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvmuh_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvmuh_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvmuh_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmuh_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8 { + __lasx_xvmuh_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16 { + __lasx_xvmuh_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32 { + __lasx_xvmuh_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvmuh_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsllwil_h_b(a: v32i8) -> v16i16 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsllwil_h_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsllwil_w_h(a: v16i16) -> v8i32 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsllwil_w_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsllwil_d_w(a: v8i32) -> v4i64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsllwil_d_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsllwil_hu_bu(a: v32u8) -> v16u16 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvsllwil_hu_bu(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsllwil_wu_hu(a: v16u16) -> v8u32 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsllwil_wu_hu(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsllwil_du_wu(a: v8u32) -> v4u64 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsllwil_du_wu(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvsran_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvsran_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvsran_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvssran_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvssran_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvssran_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8 { + __lasx_xvssran_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16 { + __lasx_xvssran_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32 { + __lasx_xvssran_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvsrarn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvsrarn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvsrarn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvssrarn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvssrarn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvssrarn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8 { + __lasx_xvssrarn_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16 { + __lasx_xvssrarn_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32 { + __lasx_xvssrarn_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvsrln_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvsrln_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvsrln_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8 { + __lasx_xvssrln_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16 { + __lasx_xvssrln_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32 { + __lasx_xvssrln_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvsrlrn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvsrlrn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvsrlrn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8 { + __lasx_xvssrlrn_bu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16 { + __lasx_xvssrlrn_hu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32 { + __lasx_xvssrlrn_wu_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrstpi_b(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvfrstpi_b(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrstpi_h(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvfrstpi_h(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + __lasx_xvfrstp_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + __lasx_xvfrstp_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvshuf4i_d(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvshuf4i_d(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbsrl_v(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvbsrl_v(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvbsll_v(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvbsll_v(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvextrins_b(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvextrins_b(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvextrins_h(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvextrins_h(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvextrins_w(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvextrins_w(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvextrins_d(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvextrins_d(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmskltz_b(a: v32i8) -> v32i8 { + __lasx_xvmskltz_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmskltz_h(a: v16i16) -> v16i16 { + __lasx_xvmskltz_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmskltz_w(a: v8i32) -> v8i32 { + __lasx_xvmskltz_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmskltz_d(a: v4i64) -> v4i64 { + __lasx_xvmskltz_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvsigncov_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvsigncov_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvsigncov_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsigncov_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + __lasx_xvfmadd_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + __lasx_xvfmadd_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + __lasx_xvfmsub_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + __lasx_xvfmsub_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + __lasx_xvfnmadd_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + __lasx_xvfnmadd_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + __lasx_xvfnmsub_s(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + __lasx_xvfnmsub_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrne_w_s(a: v8f32) -> v8i32 { + __lasx_xvftintrne_w_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrne_l_d(a: v4f64) -> v4i64 { + __lasx_xvftintrne_l_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrp_w_s(a: v8f32) -> v8i32 { + __lasx_xvftintrp_w_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrp_l_d(a: v4f64) -> v4i64 { + __lasx_xvftintrp_l_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrm_w_s(a: v8f32) -> v8i32 { + __lasx_xvftintrm_w_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrm_l_d(a: v4f64) -> v4i64 { + __lasx_xvftintrm_l_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32 { + __lasx_xvftint_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32 { + __lasx_xvffint_s_l(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32 { + __lasx_xvftintrz_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32 { + __lasx_xvftintrp_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32 { + __lasx_xvftintrm_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32 { + __lasx_xvftintrne_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftinth_l_s(a: v8f32) -> v4i64 { + __lasx_xvftinth_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintl_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintl_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffinth_d_w(a: v8i32) -> v4f64 { + __lasx_xvffinth_d_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvffintl_d_w(a: v8i32) -> v4f64 { + __lasx_xvffintl_d_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrzh_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrzh_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrzl_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrzl_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrph_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrph_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrpl_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrpl_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrmh_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrmh_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrml_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrml_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrneh_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrneh_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvftintrnel_l_s(a: v8f32) -> v4i64 { + __lasx_xvftintrnel_l_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrne_s(a: v8f32) -> v8f32 { + __lasx_xvfrintrne_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrne_d(a: v4f64) -> v4f64 { + __lasx_xvfrintrne_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrz_s(a: v8f32) -> v8f32 { + __lasx_xvfrintrz_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrz_d(a: v4f64) -> v4f64 { + __lasx_xvfrintrz_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrp_s(a: v8f32) -> v8f32 { + __lasx_xvfrintrp_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrp_d(a: v4f64) -> v4f64 { + __lasx_xvfrintrp_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrm_s(a: v8f32) -> v8f32 { + __lasx_xvfrintrm_s(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfrintrm_d(a: v4f64) -> v4f64 { + __lasx_xvfrintrm_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvld(mem_addr: *const i8) -> v32i8 { + static_assert_simm_bits!(IMM_S12, 12); + __lasx_xvld(mem_addr, IMM_S12) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvst(a: v32i8, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S12, 12); + __lasx_xvst(a, mem_addr, IMM_S12) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvstelm_b(a: v32i8, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvstelm_b(a, mem_addr, IMM_S8, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvstelm_h(a: v16i16, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvstelm_h(a, mem_addr, IMM_S8, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvstelm_w(a: v8i32, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvstelm_w(a, mem_addr, IMM_S8, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvstelm_d(a: v4i64, mem_addr: *mut i8) { + static_assert_simm_bits!(IMM_S8, 8); + static_assert_uimm_bits!(IMM1, 1); + __lasx_xvstelm_d(a, mem_addr, IMM_S8, IMM1) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvinsve0_w(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvinsve0_w(a, b, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvinsve0_d(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvinsve0_d(a, b, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvpickve_w(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvpickve_d(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvssrlrn_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvssrlrn_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvssrlrn_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8 { + __lasx_xvssrln_b_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16 { + __lasx_xvssrln_h_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32 { + __lasx_xvssrln_w_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvorn_v(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvldi() -> v4i64 { + static_assert_simm_bits!(IMM_S13, 13); + __lasx_xvldi(IMM_S13) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> v32i8 { + __lasx_xvldx(mem_addr, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvstx(a: v32i8, mem_addr: *mut i8, b: i64) { + __lasx_xvstx(a, mem_addr, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvextl_qu_du(a: v4u64) -> v4u64 { + __lasx_xvextl_qu_du(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvinsgr2vr_w(a: v8i32, b: i32) -> v8i32 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvinsgr2vr_w(a, b, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvinsgr2vr_d(a: v4i64, b: i64) -> v4i64 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvinsgr2vr_d(a, b, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve0_b(a: v32i8) -> v32i8 { + __lasx_xvreplve0_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve0_h(a: v16i16) -> v16i16 { + __lasx_xvreplve0_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve0_w(a: v8i32) -> v8i32 { + __lasx_xvreplve0_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve0_d(a: v4i64) -> v4i64 { + __lasx_xvreplve0_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvreplve0_q(a: v32i8) -> v32i8 { + __lasx_xvreplve0_q(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_h_b(a: v32i8) -> v16i16 { + __lasx_vext2xv_h_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_w_h(a: v16i16) -> v8i32 { + __lasx_vext2xv_w_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_d_w(a: v8i32) -> v4i64 { + __lasx_vext2xv_d_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_w_b(a: v32i8) -> v8i32 { + __lasx_vext2xv_w_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_d_h(a: v16i16) -> v4i64 { + __lasx_vext2xv_d_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_d_b(a: v32i8) -> v4i64 { + __lasx_vext2xv_d_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_hu_bu(a: v32i8) -> v16i16 { + __lasx_vext2xv_hu_bu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_wu_hu(a: v16i16) -> v8i32 { + __lasx_vext2xv_wu_hu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_du_wu(a: v8i32) -> v4i64 { + __lasx_vext2xv_du_wu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_wu_bu(a: v32i8) -> v8i32 { + __lasx_vext2xv_wu_bu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_du_hu(a: v16i16) -> v4i64 { + __lasx_vext2xv_du_hu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_vext2xv_du_bu(a: v32i8) -> v4i64 { + __lasx_vext2xv_du_bu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpermi_q(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvpermi_q(a, b, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpermi_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM8, 8); + __lasx_xvpermi_d(a, IMM8) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvperm_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvldrepl_b(mem_addr: *const i8) -> v32i8 { + static_assert_simm_bits!(IMM_S12, 12); + __lasx_xvldrepl_b(mem_addr, IMM_S12) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvldrepl_h(mem_addr: *const i8) -> v16i16 { + static_assert_simm_bits!(IMM_S11, 11); + __lasx_xvldrepl_h(mem_addr, IMM_S11) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvldrepl_w(mem_addr: *const i8) -> v8i32 { + static_assert_simm_bits!(IMM_S10, 10); + __lasx_xvldrepl_w(mem_addr, IMM_S10) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvldrepl_d(mem_addr: *const i8) -> v4i64 { + static_assert_simm_bits!(IMM_S9, 9); + __lasx_xvldrepl_d(mem_addr, IMM_S9) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve2gr_w(a: v8i32) -> i32 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvpickve2gr_w(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve2gr_wu(a: v8i32) -> u32 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvpickve2gr_wu(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve2gr_d(a: v4i64) -> i64 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvpickve2gr_d(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve2gr_du(a: v4i64) -> u64 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvpickve2gr_du(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvaddwev_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvaddwev_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvaddwev_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvaddwev_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvaddwev_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvaddwev_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvaddwev_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvaddwev_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsubwev_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvsubwev_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvsubwev_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvsubwev_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvsubwev_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvsubwev_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvsubwev_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvsubwev_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmulwev_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvmulwev_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvmulwev_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvmulwev_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvmulwev_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvmulwev_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvmulwev_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvmulwev_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvaddwod_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvaddwod_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvaddwod_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvaddwod_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvaddwod_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvaddwod_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvaddwod_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvaddwod_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsubwod_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvsubwod_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvsubwod_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvsubwod_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvsubwod_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvsubwod_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvsubwod_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvsubwod_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvmulwod_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64 { + __lasx_xvmulwod_d_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32 { + __lasx_xvmulwod_w_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16 { + __lasx_xvmulwod_h_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64 { + __lasx_xvmulwod_q_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { + __lasx_xvmulwod_d_wu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { + __lasx_xvmulwod_w_hu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { + __lasx_xvmulwod_h_bu(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + __lasx_xvaddwev_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + __lasx_xvaddwev_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + __lasx_xvaddwev_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + __lasx_xvmulwev_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + __lasx_xvmulwev_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + __lasx_xvmulwev_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + __lasx_xvaddwod_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + __lasx_xvaddwod_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + __lasx_xvaddwod_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + __lasx_xvmulwod_d_wu_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + __lasx_xvmulwod_w_hu_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + __lasx_xvmulwod_h_bu_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvhaddw_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvhaddw_qu_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvhsubw_q_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64 { + __lasx_xvhsubw_qu_du(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + __lasx_xvmaddwev_q_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 { + __lasx_xvmaddwev_d_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 { + __lasx_xvmaddwev_w_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 { + __lasx_xvmaddwev_h_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 { + __lasx_xvmaddwev_q_du(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 { + __lasx_xvmaddwev_d_wu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 { + __lasx_xvmaddwev_w_hu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 { + __lasx_xvmaddwev_h_bu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + __lasx_xvmaddwod_q_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 { + __lasx_xvmaddwod_d_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 { + __lasx_xvmaddwod_w_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 { + __lasx_xvmaddwod_h_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 { + __lasx_xvmaddwod_q_du(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 { + __lasx_xvmaddwod_d_wu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 { + __lasx_xvmaddwod_w_hu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 { + __lasx_xvmaddwod_h_bu(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 { + __lasx_xvmaddwev_q_du_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 { + __lasx_xvmaddwev_d_wu_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 { + __lasx_xvmaddwev_w_hu_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 { + __lasx_xvmaddwev_h_bu_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 { + __lasx_xvmaddwod_q_du_d(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 { + __lasx_xvmaddwod_d_wu_w(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 { + __lasx_xvmaddwod_w_hu_h(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 { + __lasx_xvmaddwod_h_bu_b(a, b, c) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8 { + __lasx_xvrotr_b(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16 { + __lasx_xvrotr_h(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32 { + __lasx_xvrotr_w(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvrotr_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvadd_q(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64 { + __lasx_xvsub_q(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + __lasx_xvaddwev_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + __lasx_xvaddwod_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + __lasx_xvmulwev_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + __lasx_xvmulwod_q_du_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmskgez_b(a: v32i8) -> v32i8 { + __lasx_xvmskgez_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvmsknz_b(a: v32i8) -> v32i8 { + __lasx_xvmsknz_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_h_b(a: v32i8) -> v16i16 { + __lasx_xvexth_h_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_w_h(a: v16i16) -> v8i32 { + __lasx_xvexth_w_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_d_w(a: v8i32) -> v4i64 { + __lasx_xvexth_d_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_q_d(a: v4i64) -> v4i64 { + __lasx_xvexth_q_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_hu_bu(a: v32u8) -> v16u16 { + __lasx_xvexth_hu_bu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_wu_hu(a: v16u16) -> v8u32 { + __lasx_xvexth_wu_hu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_du_wu(a: v8u32) -> v4u64 { + __lasx_xvexth_du_wu(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvexth_qu_du(a: v4u64) -> v4u64 { + __lasx_xvexth_qu_du(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotri_b(a: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvrotri_b(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotri_h(a: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvrotri_h(a, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotri_w(a: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvrotri_w(a, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrotri_d(a: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvrotri_d(a, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvextl_q_d(a: v4i64) -> v4i64 { + __lasx_xvextl_q_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlni_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrlni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlni_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrlni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlni_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrlni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlni_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvsrlni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrni_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrlrni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrni_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrlrni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrni_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrlrni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrlrni_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvsrlrni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrlni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrlni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrlni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrlni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_bu_h(a: v32u8, b: v32i8) -> v32u8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrlni_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_hu_w(a: v16u16, b: v16i16) -> v16u16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrlni_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_wu_d(a: v8u32, b: v8i32) -> v8u32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrlni_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlni_du_q(a: v4u64, b: v4i64) -> v4u64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrlni_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrlrni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrlrni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrlrni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrlrni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_bu_h(a: v32u8, b: v32i8) -> v32u8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrlrni_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_hu_w(a: v16u16, b: v16i16) -> v16u16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrlrni_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_wu_d(a: v8u32, b: v8i32) -> v8u32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrlrni_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrlrni_du_q(a: v4u64, b: v4i64) -> v4u64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrlrni_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrani_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrani_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrani_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrani_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrani_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrani_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrani_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvsrani_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarni_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvsrarni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarni_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvsrarni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarni_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvsrarni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvsrarni_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvsrarni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrani_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrani_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrani_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrani_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_bu_h(a: v32u8, b: v32i8) -> v32u8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrani_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_hu_w(a: v16u16, b: v16i16) -> v16u16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrani_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_wu_d(a: v8u32, b: v8i32) -> v8u32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrani_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrani_du_q(a: v4u64, b: v4i64) -> v4u64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrani_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_b_h(a: v32i8, b: v32i8) -> v32i8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrarni_b_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_h_w(a: v16i16, b: v16i16) -> v16i16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrarni_h_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_w_d(a: v8i32, b: v8i32) -> v8i32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrarni_w_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_d_q(a: v4i64, b: v4i64) -> v4i64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrarni_d_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_bu_h(a: v32u8, b: v32i8) -> v32u8 { + static_assert_uimm_bits!(IMM4, 4); + __lasx_xvssrarni_bu_h(a, b, IMM4) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_hu_w(a: v16u16, b: v16i16) -> v16u16 { + static_assert_uimm_bits!(IMM5, 5); + __lasx_xvssrarni_hu_w(a, b, IMM5) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_wu_d(a: v8u32, b: v8i32) -> v8u32 { + static_assert_uimm_bits!(IMM6, 6); + __lasx_xvssrarni_wu_d(a, b, IMM6) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvssrarni_du_q(a: v4u64, b: v4i64) -> v4u64 { + static_assert_uimm_bits!(IMM7, 7); + __lasx_xvssrarni_du_q(a, b, IMM7) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbnz_b(a: v32u8) -> i32 { + __lasx_xbnz_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbnz_d(a: v4u64) -> i32 { + __lasx_xbnz_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbnz_h(a: v16u16) -> i32 { + __lasx_xbnz_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbnz_v(a: v32u8) -> i32 { + __lasx_xbnz_v(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbnz_w(a: v8u32) -> i32 { + __lasx_xbnz_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbz_b(a: v32u8) -> i32 { + __lasx_xbz_b(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbz_d(a: v4u64) -> i32 { + __lasx_xbz_d(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbz_h(a: v16u16) -> i32 { + __lasx_xbz_h(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbz_v(a: v32u8) -> i32 { + __lasx_xbz_v(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xbz_w(a: v8u32) -> i32 { + __lasx_xbz_w(a) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_caf_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_caf_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_ceq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_ceq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cle_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cle_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_clt_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_clt_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cne_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cne_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cor_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cor_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cueq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cueq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cule_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cule_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cult_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cult_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cun_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_cune_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cune_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_cun_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_saf_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_saf_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_seq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_seq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sle_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sle_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_slt_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_slt_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sne_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sne_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sor_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sor_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sueq_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sueq_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sule_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sule_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sult_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sult_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sun_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64 { + __lasx_xvfcmp_sune_d(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sune_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32 { + __lasx_xvfcmp_sun_s(a, b) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve_d_f(a: v4f64) -> v4f64 { + static_assert_uimm_bits!(IMM2, 2); + __lasx_xvpickve_d_f(a, IMM2) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvpickve_w_f(a: v8f32) -> v8f32 { + static_assert_uimm_bits!(IMM3, 3); + __lasx_xvpickve_w_f(a, IMM3) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepli_b() -> v32i8 { + static_assert_simm_bits!(IMM_S10, 10); + __lasx_xvrepli_b(IMM_S10) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepli_d() -> v4i64 { + static_assert_simm_bits!(IMM_S10, 10); + __lasx_xvrepli_d(IMM_S10) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepli_h() -> v16i16 { + static_assert_simm_bits!(IMM_S10, 10); + __lasx_xvrepli_h(IMM_S10) +} + +#[inline] +#[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn lasx_xvrepli_w() -> v8i32 { + static_assert_simm_bits!(IMM_S10, 10); + __lasx_xvrepli_w(IMM_S10) +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs new file mode 100644 index 000000000000..91e422e7a56d --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs @@ -0,0 +1,13 @@ +//! LoongArch64 LASX intrinsics + +#![allow(non_camel_case_types)] + +#[rustfmt::skip] +mod types; + +#[rustfmt::skip] +mod generated; + +#[rustfmt::skip] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub use self::generated::*; diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs new file mode 100644 index 000000000000..c584d61f5246 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs @@ -0,0 +1,57 @@ +types! { + /// LOONGARCH-specific 256-bit wide vector of 32 packed `i8`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v32i8( + pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, + pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, + pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, + pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, + ); + + /// LOONGARCH-specific 256-bit wide vector of 16 packed `i16`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v16i16( + pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, + pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, + ); + + /// LOONGARCH-specific 256-bit wide vector of 8 packed `i32`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v8i32(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); + + /// LOONGARCH-specific 256-bit wide vector of 4 packed `i64`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v4i64(pub(crate) i64, pub(crate) i64, pub(crate) i64, pub(crate) i64); + + /// LOONGARCH-specific 256-bit wide vector of 32 packed `u8`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v32u8( + pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, + pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, + pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, + pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, + ); + + /// LOONGARCH-specific 256-bit wide vector of 16 packed `u16`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v16u16( + pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, + pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, + ); + + /// LOONGARCH-specific 256-bit wide vector of 8 packed `u32`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v8u32(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); + + /// LOONGARCH-specific 256-bit wide vector of 4 packed `u64`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v4u64(pub(crate) u64, pub(crate) u64, pub(crate) u64, pub(crate) u64); + + /// LOONGARCH-specific 128-bit wide vector of 8 packed `f32`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v8f32(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); + + /// LOONGARCH-specific 256-bit wide vector of 4 packed `f64`. + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub struct v4f64(pub(crate) f64, pub(crate) f64, pub(crate) f64, pub(crate) f64); +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs index 9c27c2e7919e..56f54958b7ac 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs @@ -1,6 +1,9 @@ //! `LoongArch` intrinsics +mod lasx; mod lsx; +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub use self::lasx::*; #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub use self::lsx::*; diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec b/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec new file mode 100644 index 000000000000..63c52291918c --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec @@ -0,0 +1,3685 @@ +// This code is automatically generated. DO NOT MODIFY. +// ``` +// OUT_DIR=`pwd`/crates/stdarch-gen-loongarch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasxintrin.h +// ``` + +/// lasx_xvsll_b +name = lasx_xvsll_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsll_h +name = lasx_xvsll_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsll_w +name = lasx_xvsll_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsll_d +name = lasx_xvsll_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvslli_b +name = lasx_xvslli_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvslli_h +name = lasx_xvslli_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvslli_w +name = lasx_xvslli_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvslli_d +name = lasx_xvslli_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvsra_b +name = lasx_xvsra_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsra_h +name = lasx_xvsra_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsra_w +name = lasx_xvsra_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsra_d +name = lasx_xvsra_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsrai_b +name = lasx_xvsrai_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvsrai_h +name = lasx_xvsrai_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvsrai_w +name = lasx_xvsrai_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvsrai_d +name = lasx_xvsrai_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvsrar_b +name = lasx_xvsrar_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsrar_h +name = lasx_xvsrar_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsrar_w +name = lasx_xvsrar_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsrar_d +name = lasx_xvsrar_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsrari_b +name = lasx_xvsrari_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvsrari_h +name = lasx_xvsrari_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvsrari_w +name = lasx_xvsrari_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvsrari_d +name = lasx_xvsrari_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvsrl_b +name = lasx_xvsrl_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsrl_h +name = lasx_xvsrl_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsrl_w +name = lasx_xvsrl_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsrl_d +name = lasx_xvsrl_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsrli_b +name = lasx_xvsrli_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvsrli_h +name = lasx_xvsrli_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvsrli_w +name = lasx_xvsrli_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvsrli_d +name = lasx_xvsrli_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvsrlr_b +name = lasx_xvsrlr_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsrlr_h +name = lasx_xvsrlr_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsrlr_w +name = lasx_xvsrlr_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsrlr_d +name = lasx_xvsrlr_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsrlri_b +name = lasx_xvsrlri_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvsrlri_h +name = lasx_xvsrlri_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvsrlri_w +name = lasx_xvsrlri_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvsrlri_d +name = lasx_xvsrlri_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvbitclr_b +name = lasx_xvbitclr_b +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvbitclr_h +name = lasx_xvbitclr_h +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvbitclr_w +name = lasx_xvbitclr_w +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvbitclr_d +name = lasx_xvbitclr_d +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvbitclri_b +name = lasx_xvbitclri_b +asm-fmts = xd, xj, ui3 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvbitclri_h +name = lasx_xvbitclri_h +asm-fmts = xd, xj, ui4 +data-types = UV16HI, UV16HI, UQI + +/// lasx_xvbitclri_w +name = lasx_xvbitclri_w +asm-fmts = xd, xj, ui5 +data-types = UV8SI, UV8SI, UQI + +/// lasx_xvbitclri_d +name = lasx_xvbitclri_d +asm-fmts = xd, xj, ui6 +data-types = UV4DI, UV4DI, UQI + +/// lasx_xvbitset_b +name = lasx_xvbitset_b +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvbitset_h +name = lasx_xvbitset_h +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvbitset_w +name = lasx_xvbitset_w +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvbitset_d +name = lasx_xvbitset_d +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvbitseti_b +name = lasx_xvbitseti_b +asm-fmts = xd, xj, ui3 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvbitseti_h +name = lasx_xvbitseti_h +asm-fmts = xd, xj, ui4 +data-types = UV16HI, UV16HI, UQI + +/// lasx_xvbitseti_w +name = lasx_xvbitseti_w +asm-fmts = xd, xj, ui5 +data-types = UV8SI, UV8SI, UQI + +/// lasx_xvbitseti_d +name = lasx_xvbitseti_d +asm-fmts = xd, xj, ui6 +data-types = UV4DI, UV4DI, UQI + +/// lasx_xvbitrev_b +name = lasx_xvbitrev_b +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvbitrev_h +name = lasx_xvbitrev_h +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvbitrev_w +name = lasx_xvbitrev_w +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvbitrev_d +name = lasx_xvbitrev_d +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvbitrevi_b +name = lasx_xvbitrevi_b +asm-fmts = xd, xj, ui3 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvbitrevi_h +name = lasx_xvbitrevi_h +asm-fmts = xd, xj, ui4 +data-types = UV16HI, UV16HI, UQI + +/// lasx_xvbitrevi_w +name = lasx_xvbitrevi_w +asm-fmts = xd, xj, ui5 +data-types = UV8SI, UV8SI, UQI + +/// lasx_xvbitrevi_d +name = lasx_xvbitrevi_d +asm-fmts = xd, xj, ui6 +data-types = UV4DI, UV4DI, UQI + +/// lasx_xvadd_b +name = lasx_xvadd_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvadd_h +name = lasx_xvadd_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvadd_w +name = lasx_xvadd_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvadd_d +name = lasx_xvadd_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvaddi_bu +name = lasx_xvaddi_bu +asm-fmts = xd, xj, ui5 +data-types = V32QI, V32QI, UQI + +/// lasx_xvaddi_hu +name = lasx_xvaddi_hu +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, UQI + +/// lasx_xvaddi_wu +name = lasx_xvaddi_wu +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvaddi_du +name = lasx_xvaddi_du +asm-fmts = xd, xj, ui5 +data-types = V4DI, V4DI, UQI + +/// lasx_xvsub_b +name = lasx_xvsub_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsub_h +name = lasx_xvsub_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsub_w +name = lasx_xvsub_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsub_d +name = lasx_xvsub_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsubi_bu +name = lasx_xvsubi_bu +asm-fmts = xd, xj, ui5 +data-types = V32QI, V32QI, UQI + +/// lasx_xvsubi_hu +name = lasx_xvsubi_hu +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, UQI + +/// lasx_xvsubi_wu +name = lasx_xvsubi_wu +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvsubi_du +name = lasx_xvsubi_du +asm-fmts = xd, xj, ui5 +data-types = V4DI, V4DI, UQI + +/// lasx_xvmax_b +name = lasx_xvmax_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvmax_h +name = lasx_xvmax_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvmax_w +name = lasx_xvmax_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvmax_d +name = lasx_xvmax_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmaxi_b +name = lasx_xvmaxi_b +asm-fmts = xd, xj, si5 +data-types = V32QI, V32QI, QI + +/// lasx_xvmaxi_h +name = lasx_xvmaxi_h +asm-fmts = xd, xj, si5 +data-types = V16HI, V16HI, QI + +/// lasx_xvmaxi_w +name = lasx_xvmaxi_w +asm-fmts = xd, xj, si5 +data-types = V8SI, V8SI, QI + +/// lasx_xvmaxi_d +name = lasx_xvmaxi_d +asm-fmts = xd, xj, si5 +data-types = V4DI, V4DI, QI + +/// lasx_xvmax_bu +name = lasx_xvmax_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvmax_hu +name = lasx_xvmax_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvmax_wu +name = lasx_xvmax_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvmax_du +name = lasx_xvmax_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvmaxi_bu +name = lasx_xvmaxi_bu +asm-fmts = xd, xj, ui5 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvmaxi_hu +name = lasx_xvmaxi_hu +asm-fmts = xd, xj, ui5 +data-types = UV16HI, UV16HI, UQI + +/// lasx_xvmaxi_wu +name = lasx_xvmaxi_wu +asm-fmts = xd, xj, ui5 +data-types = UV8SI, UV8SI, UQI + +/// lasx_xvmaxi_du +name = lasx_xvmaxi_du +asm-fmts = xd, xj, ui5 +data-types = UV4DI, UV4DI, UQI + +/// lasx_xvmin_b +name = lasx_xvmin_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvmin_h +name = lasx_xvmin_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvmin_w +name = lasx_xvmin_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvmin_d +name = lasx_xvmin_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmini_b +name = lasx_xvmini_b +asm-fmts = xd, xj, si5 +data-types = V32QI, V32QI, QI + +/// lasx_xvmini_h +name = lasx_xvmini_h +asm-fmts = xd, xj, si5 +data-types = V16HI, V16HI, QI + +/// lasx_xvmini_w +name = lasx_xvmini_w +asm-fmts = xd, xj, si5 +data-types = V8SI, V8SI, QI + +/// lasx_xvmini_d +name = lasx_xvmini_d +asm-fmts = xd, xj, si5 +data-types = V4DI, V4DI, QI + +/// lasx_xvmin_bu +name = lasx_xvmin_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvmin_hu +name = lasx_xvmin_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvmin_wu +name = lasx_xvmin_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvmin_du +name = lasx_xvmin_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvmini_bu +name = lasx_xvmini_bu +asm-fmts = xd, xj, ui5 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvmini_hu +name = lasx_xvmini_hu +asm-fmts = xd, xj, ui5 +data-types = UV16HI, UV16HI, UQI + +/// lasx_xvmini_wu +name = lasx_xvmini_wu +asm-fmts = xd, xj, ui5 +data-types = UV8SI, UV8SI, UQI + +/// lasx_xvmini_du +name = lasx_xvmini_du +asm-fmts = xd, xj, ui5 +data-types = UV4DI, UV4DI, UQI + +/// lasx_xvseq_b +name = lasx_xvseq_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvseq_h +name = lasx_xvseq_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvseq_w +name = lasx_xvseq_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvseq_d +name = lasx_xvseq_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvseqi_b +name = lasx_xvseqi_b +asm-fmts = xd, xj, si5 +data-types = V32QI, V32QI, QI + +/// lasx_xvseqi_h +name = lasx_xvseqi_h +asm-fmts = xd, xj, si5 +data-types = V16HI, V16HI, QI + +/// lasx_xvseqi_w +name = lasx_xvseqi_w +asm-fmts = xd, xj, si5 +data-types = V8SI, V8SI, QI + +/// lasx_xvseqi_d +name = lasx_xvseqi_d +asm-fmts = xd, xj, si5 +data-types = V4DI, V4DI, QI + +/// lasx_xvslt_b +name = lasx_xvslt_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvslt_h +name = lasx_xvslt_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvslt_w +name = lasx_xvslt_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvslt_d +name = lasx_xvslt_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvslti_b +name = lasx_xvslti_b +asm-fmts = xd, xj, si5 +data-types = V32QI, V32QI, QI + +/// lasx_xvslti_h +name = lasx_xvslti_h +asm-fmts = xd, xj, si5 +data-types = V16HI, V16HI, QI + +/// lasx_xvslti_w +name = lasx_xvslti_w +asm-fmts = xd, xj, si5 +data-types = V8SI, V8SI, QI + +/// lasx_xvslti_d +name = lasx_xvslti_d +asm-fmts = xd, xj, si5 +data-types = V4DI, V4DI, QI + +/// lasx_xvslt_bu +name = lasx_xvslt_bu +asm-fmts = xd, xj, xk +data-types = V32QI, UV32QI, UV32QI + +/// lasx_xvslt_hu +name = lasx_xvslt_hu +asm-fmts = xd, xj, xk +data-types = V16HI, UV16HI, UV16HI + +/// lasx_xvslt_wu +name = lasx_xvslt_wu +asm-fmts = xd, xj, xk +data-types = V8SI, UV8SI, UV8SI + +/// lasx_xvslt_du +name = lasx_xvslt_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvslti_bu +name = lasx_xvslti_bu +asm-fmts = xd, xj, ui5 +data-types = V32QI, UV32QI, UQI + +/// lasx_xvslti_hu +name = lasx_xvslti_hu +asm-fmts = xd, xj, ui5 +data-types = V16HI, UV16HI, UQI + +/// lasx_xvslti_wu +name = lasx_xvslti_wu +asm-fmts = xd, xj, ui5 +data-types = V8SI, UV8SI, UQI + +/// lasx_xvslti_du +name = lasx_xvslti_du +asm-fmts = xd, xj, ui5 +data-types = V4DI, UV4DI, UQI + +/// lasx_xvsle_b +name = lasx_xvsle_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsle_h +name = lasx_xvsle_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsle_w +name = lasx_xvsle_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsle_d +name = lasx_xvsle_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvslei_b +name = lasx_xvslei_b +asm-fmts = xd, xj, si5 +data-types = V32QI, V32QI, QI + +/// lasx_xvslei_h +name = lasx_xvslei_h +asm-fmts = xd, xj, si5 +data-types = V16HI, V16HI, QI + +/// lasx_xvslei_w +name = lasx_xvslei_w +asm-fmts = xd, xj, si5 +data-types = V8SI, V8SI, QI + +/// lasx_xvslei_d +name = lasx_xvslei_d +asm-fmts = xd, xj, si5 +data-types = V4DI, V4DI, QI + +/// lasx_xvsle_bu +name = lasx_xvsle_bu +asm-fmts = xd, xj, xk +data-types = V32QI, UV32QI, UV32QI + +/// lasx_xvsle_hu +name = lasx_xvsle_hu +asm-fmts = xd, xj, xk +data-types = V16HI, UV16HI, UV16HI + +/// lasx_xvsle_wu +name = lasx_xvsle_wu +asm-fmts = xd, xj, xk +data-types = V8SI, UV8SI, UV8SI + +/// lasx_xvsle_du +name = lasx_xvsle_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvslei_bu +name = lasx_xvslei_bu +asm-fmts = xd, xj, ui5 +data-types = V32QI, UV32QI, UQI + +/// lasx_xvslei_hu +name = lasx_xvslei_hu +asm-fmts = xd, xj, ui5 +data-types = V16HI, UV16HI, UQI + +/// lasx_xvslei_wu +name = lasx_xvslei_wu +asm-fmts = xd, xj, ui5 +data-types = V8SI, UV8SI, UQI + +/// lasx_xvslei_du +name = lasx_xvslei_du +asm-fmts = xd, xj, ui5 +data-types = V4DI, UV4DI, UQI + +/// lasx_xvsat_b +name = lasx_xvsat_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvsat_h +name = lasx_xvsat_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvsat_w +name = lasx_xvsat_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvsat_d +name = lasx_xvsat_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvsat_bu +name = lasx_xvsat_bu +asm-fmts = xd, xj, ui3 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvsat_hu +name = lasx_xvsat_hu +asm-fmts = xd, xj, ui4 +data-types = UV16HI, UV16HI, UQI + +/// lasx_xvsat_wu +name = lasx_xvsat_wu +asm-fmts = xd, xj, ui5 +data-types = UV8SI, UV8SI, UQI + +/// lasx_xvsat_du +name = lasx_xvsat_du +asm-fmts = xd, xj, ui6 +data-types = UV4DI, UV4DI, UQI + +/// lasx_xvadda_b +name = lasx_xvadda_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvadda_h +name = lasx_xvadda_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvadda_w +name = lasx_xvadda_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvadda_d +name = lasx_xvadda_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsadd_b +name = lasx_xvsadd_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsadd_h +name = lasx_xvsadd_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsadd_w +name = lasx_xvsadd_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsadd_d +name = lasx_xvsadd_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsadd_bu +name = lasx_xvsadd_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvsadd_hu +name = lasx_xvsadd_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvsadd_wu +name = lasx_xvsadd_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvsadd_du +name = lasx_xvsadd_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvavg_b +name = lasx_xvavg_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvavg_h +name = lasx_xvavg_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvavg_w +name = lasx_xvavg_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvavg_d +name = lasx_xvavg_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvavg_bu +name = lasx_xvavg_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvavg_hu +name = lasx_xvavg_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvavg_wu +name = lasx_xvavg_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvavg_du +name = lasx_xvavg_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvavgr_b +name = lasx_xvavgr_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvavgr_h +name = lasx_xvavgr_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvavgr_w +name = lasx_xvavgr_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvavgr_d +name = lasx_xvavgr_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvavgr_bu +name = lasx_xvavgr_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvavgr_hu +name = lasx_xvavgr_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvavgr_wu +name = lasx_xvavgr_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvavgr_du +name = lasx_xvavgr_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvssub_b +name = lasx_xvssub_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvssub_h +name = lasx_xvssub_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvssub_w +name = lasx_xvssub_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvssub_d +name = lasx_xvssub_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvssub_bu +name = lasx_xvssub_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvssub_hu +name = lasx_xvssub_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvssub_wu +name = lasx_xvssub_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvssub_du +name = lasx_xvssub_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvabsd_b +name = lasx_xvabsd_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvabsd_h +name = lasx_xvabsd_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvabsd_w +name = lasx_xvabsd_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvabsd_d +name = lasx_xvabsd_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvabsd_bu +name = lasx_xvabsd_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvabsd_hu +name = lasx_xvabsd_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvabsd_wu +name = lasx_xvabsd_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvabsd_du +name = lasx_xvabsd_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvmul_b +name = lasx_xvmul_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvmul_h +name = lasx_xvmul_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvmul_w +name = lasx_xvmul_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvmul_d +name = lasx_xvmul_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmadd_b +name = lasx_xvmadd_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI, V32QI + +/// lasx_xvmadd_h +name = lasx_xvmadd_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI, V16HI + +/// lasx_xvmadd_w +name = lasx_xvmadd_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI, V8SI + +/// lasx_xvmadd_d +name = lasx_xvmadd_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI, V4DI + +/// lasx_xvmsub_b +name = lasx_xvmsub_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI, V32QI + +/// lasx_xvmsub_h +name = lasx_xvmsub_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI, V16HI + +/// lasx_xvmsub_w +name = lasx_xvmsub_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI, V8SI + +/// lasx_xvmsub_d +name = lasx_xvmsub_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI, V4DI + +/// lasx_xvdiv_b +name = lasx_xvdiv_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvdiv_h +name = lasx_xvdiv_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvdiv_w +name = lasx_xvdiv_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvdiv_d +name = lasx_xvdiv_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvdiv_bu +name = lasx_xvdiv_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvdiv_hu +name = lasx_xvdiv_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvdiv_wu +name = lasx_xvdiv_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvdiv_du +name = lasx_xvdiv_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvhaddw_h_b +name = lasx_xvhaddw_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvhaddw_w_h +name = lasx_xvhaddw_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvhaddw_d_w +name = lasx_xvhaddw_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvhaddw_hu_bu +name = lasx_xvhaddw_hu_bu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV32QI, UV32QI + +/// lasx_xvhaddw_wu_hu +name = lasx_xvhaddw_wu_hu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV16HI, UV16HI + +/// lasx_xvhaddw_du_wu +name = lasx_xvhaddw_du_wu +asm-fmts = xd, xj, xk +data-types = UV4DI, UV8SI, UV8SI + +/// lasx_xvhsubw_h_b +name = lasx_xvhsubw_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvhsubw_w_h +name = lasx_xvhsubw_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvhsubw_d_w +name = lasx_xvhsubw_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvhsubw_hu_bu +name = lasx_xvhsubw_hu_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvhsubw_wu_hu +name = lasx_xvhsubw_wu_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvhsubw_du_wu +name = lasx_xvhsubw_du_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvmod_b +name = lasx_xvmod_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvmod_h +name = lasx_xvmod_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvmod_w +name = lasx_xvmod_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvmod_d +name = lasx_xvmod_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmod_bu +name = lasx_xvmod_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvmod_hu +name = lasx_xvmod_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvmod_wu +name = lasx_xvmod_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvmod_du +name = lasx_xvmod_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvrepl128vei_b +name = lasx_xvrepl128vei_b +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, UQI + +/// lasx_xvrepl128vei_h +name = lasx_xvrepl128vei_h +asm-fmts = xd, xj, ui3 +data-types = V16HI, V16HI, UQI + +/// lasx_xvrepl128vei_w +name = lasx_xvrepl128vei_w +asm-fmts = xd, xj, ui2 +data-types = V8SI, V8SI, UQI + +/// lasx_xvrepl128vei_d +name = lasx_xvrepl128vei_d +asm-fmts = xd, xj, ui1 +data-types = V4DI, V4DI, UQI + +/// lasx_xvpickev_b +name = lasx_xvpickev_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvpickev_h +name = lasx_xvpickev_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvpickev_w +name = lasx_xvpickev_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvpickev_d +name = lasx_xvpickev_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvpickod_b +name = lasx_xvpickod_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvpickod_h +name = lasx_xvpickod_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvpickod_w +name = lasx_xvpickod_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvpickod_d +name = lasx_xvpickod_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvilvh_b +name = lasx_xvilvh_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvilvh_h +name = lasx_xvilvh_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvilvh_w +name = lasx_xvilvh_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvilvh_d +name = lasx_xvilvh_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvilvl_b +name = lasx_xvilvl_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvilvl_h +name = lasx_xvilvl_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvilvl_w +name = lasx_xvilvl_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvilvl_d +name = lasx_xvilvl_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvpackev_b +name = lasx_xvpackev_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvpackev_h +name = lasx_xvpackev_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvpackev_w +name = lasx_xvpackev_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvpackev_d +name = lasx_xvpackev_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvpackod_b +name = lasx_xvpackod_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvpackod_h +name = lasx_xvpackod_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvpackod_w +name = lasx_xvpackod_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvpackod_d +name = lasx_xvpackod_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvshuf_b +name = lasx_xvshuf_b +asm-fmts = xd, xj, xk, xa +data-types = V32QI, V32QI, V32QI, V32QI + +/// lasx_xvshuf_h +name = lasx_xvshuf_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI, V16HI + +/// lasx_xvshuf_w +name = lasx_xvshuf_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI, V8SI + +/// lasx_xvshuf_d +name = lasx_xvshuf_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI, V4DI + +/// lasx_xvand_v +name = lasx_xvand_v +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvandi_b +name = lasx_xvandi_b +asm-fmts = xd, xj, ui8 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvor_v +name = lasx_xvor_v +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvori_b +name = lasx_xvori_b +asm-fmts = xd, xj, ui8 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvnor_v +name = lasx_xvnor_v +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvnori_b +name = lasx_xvnori_b +asm-fmts = xd, xj, ui8 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvxor_v +name = lasx_xvxor_v +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvxori_b +name = lasx_xvxori_b +asm-fmts = xd, xj, ui8 +data-types = UV32QI, UV32QI, UQI + +/// lasx_xvbitsel_v +name = lasx_xvbitsel_v +asm-fmts = xd, xj, xk, xa +data-types = UV32QI, UV32QI, UV32QI, UV32QI + +/// lasx_xvbitseli_b +name = lasx_xvbitseli_b +asm-fmts = xd, xj, ui8 +data-types = UV32QI, UV32QI, UV32QI, USI + +/// lasx_xvshuf4i_b +name = lasx_xvshuf4i_b +asm-fmts = xd, xj, ui8 +data-types = V32QI, V32QI, USI + +/// lasx_xvshuf4i_h +name = lasx_xvshuf4i_h +asm-fmts = xd, xj, ui8 +data-types = V16HI, V16HI, USI + +/// lasx_xvshuf4i_w +name = lasx_xvshuf4i_w +asm-fmts = xd, xj, ui8 +data-types = V8SI, V8SI, USI + +/// lasx_xvreplgr2vr_b +name = lasx_xvreplgr2vr_b +asm-fmts = xd, rj +data-types = V32QI, SI + +/// lasx_xvreplgr2vr_h +name = lasx_xvreplgr2vr_h +asm-fmts = xd, rj +data-types = V16HI, SI + +/// lasx_xvreplgr2vr_w +name = lasx_xvreplgr2vr_w +asm-fmts = xd, rj +data-types = V8SI, SI + +/// lasx_xvreplgr2vr_d +name = lasx_xvreplgr2vr_d +asm-fmts = xd, rj +data-types = V4DI, DI + +/// lasx_xvpcnt_b +name = lasx_xvpcnt_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvpcnt_h +name = lasx_xvpcnt_h +asm-fmts = xd, xj +data-types = V16HI, V16HI + +/// lasx_xvpcnt_w +name = lasx_xvpcnt_w +asm-fmts = xd, xj +data-types = V8SI, V8SI + +/// lasx_xvpcnt_d +name = lasx_xvpcnt_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvclo_b +name = lasx_xvclo_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvclo_h +name = lasx_xvclo_h +asm-fmts = xd, xj +data-types = V16HI, V16HI + +/// lasx_xvclo_w +name = lasx_xvclo_w +asm-fmts = xd, xj +data-types = V8SI, V8SI + +/// lasx_xvclo_d +name = lasx_xvclo_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvclz_b +name = lasx_xvclz_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvclz_h +name = lasx_xvclz_h +asm-fmts = xd, xj +data-types = V16HI, V16HI + +/// lasx_xvclz_w +name = lasx_xvclz_w +asm-fmts = xd, xj +data-types = V8SI, V8SI + +/// lasx_xvclz_d +name = lasx_xvclz_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvfadd_s +name = lasx_xvfadd_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfadd_d +name = lasx_xvfadd_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfsub_s +name = lasx_xvfsub_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfsub_d +name = lasx_xvfsub_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfmul_s +name = lasx_xvfmul_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfmul_d +name = lasx_xvfmul_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfdiv_s +name = lasx_xvfdiv_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfdiv_d +name = lasx_xvfdiv_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfcvt_h_s +name = lasx_xvfcvt_h_s +asm-fmts = xd, xj, xk +data-types = V16HI, V8SF, V8SF + +/// lasx_xvfcvt_s_d +name = lasx_xvfcvt_s_d +asm-fmts = xd, xj, xk +data-types = V8SF, V4DF, V4DF + +/// lasx_xvfmin_s +name = lasx_xvfmin_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfmin_d +name = lasx_xvfmin_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfmina_s +name = lasx_xvfmina_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfmina_d +name = lasx_xvfmina_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfmax_s +name = lasx_xvfmax_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfmax_d +name = lasx_xvfmax_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfmaxa_s +name = lasx_xvfmaxa_s +asm-fmts = xd, xj, xk +data-types = V8SF, V8SF, V8SF + +/// lasx_xvfmaxa_d +name = lasx_xvfmaxa_d +asm-fmts = xd, xj, xk +data-types = V4DF, V4DF, V4DF + +/// lasx_xvfclass_s +name = lasx_xvfclass_s +asm-fmts = xd, xj +data-types = V8SI, V8SF + +/// lasx_xvfclass_d +name = lasx_xvfclass_d +asm-fmts = xd, xj +data-types = V4DI, V4DF + +/// lasx_xvfsqrt_s +name = lasx_xvfsqrt_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfsqrt_d +name = lasx_xvfsqrt_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfrecip_s +name = lasx_xvfrecip_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrecip_d +name = lasx_xvfrecip_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfrint_s +name = lasx_xvfrint_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrint_d +name = lasx_xvfrint_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfrsqrt_s +name = lasx_xvfrsqrt_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrsqrt_d +name = lasx_xvfrsqrt_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvflogb_s +name = lasx_xvflogb_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvflogb_d +name = lasx_xvflogb_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfcvth_s_h +name = lasx_xvfcvth_s_h +asm-fmts = xd, xj +data-types = V8SF, V16HI + +/// lasx_xvfcvth_d_s +name = lasx_xvfcvth_d_s +asm-fmts = xd, xj +data-types = V4DF, V8SF + +/// lasx_xvfcvtl_s_h +name = lasx_xvfcvtl_s_h +asm-fmts = xd, xj +data-types = V8SF, V16HI + +/// lasx_xvfcvtl_d_s +name = lasx_xvfcvtl_d_s +asm-fmts = xd, xj +data-types = V4DF, V8SF + +/// lasx_xvftint_w_s +name = lasx_xvftint_w_s +asm-fmts = xd, xj +data-types = V8SI, V8SF + +/// lasx_xvftint_l_d +name = lasx_xvftint_l_d +asm-fmts = xd, xj +data-types = V4DI, V4DF + +/// lasx_xvftint_wu_s +name = lasx_xvftint_wu_s +asm-fmts = xd, xj +data-types = UV8SI, V8SF + +/// lasx_xvftint_lu_d +name = lasx_xvftint_lu_d +asm-fmts = xd, xj +data-types = UV4DI, V4DF + +/// lasx_xvftintrz_w_s +name = lasx_xvftintrz_w_s +asm-fmts = xd, xj +data-types = V8SI, V8SF + +/// lasx_xvftintrz_l_d +name = lasx_xvftintrz_l_d +asm-fmts = xd, xj +data-types = V4DI, V4DF + +/// lasx_xvftintrz_wu_s +name = lasx_xvftintrz_wu_s +asm-fmts = xd, xj +data-types = UV8SI, V8SF + +/// lasx_xvftintrz_lu_d +name = lasx_xvftintrz_lu_d +asm-fmts = xd, xj +data-types = UV4DI, V4DF + +/// lasx_xvffint_s_w +name = lasx_xvffint_s_w +asm-fmts = xd, xj +data-types = V8SF, V8SI + +/// lasx_xvffint_d_l +name = lasx_xvffint_d_l +asm-fmts = xd, xj +data-types = V4DF, V4DI + +/// lasx_xvffint_s_wu +name = lasx_xvffint_s_wu +asm-fmts = xd, xj +data-types = V8SF, UV8SI + +/// lasx_xvffint_d_lu +name = lasx_xvffint_d_lu +asm-fmts = xd, xj +data-types = V4DF, UV4DI + +/// lasx_xvreplve_b +name = lasx_xvreplve_b +asm-fmts = xd, xj, rk +data-types = V32QI, V32QI, SI + +/// lasx_xvreplve_h +name = lasx_xvreplve_h +asm-fmts = xd, xj, rk +data-types = V16HI, V16HI, SI + +/// lasx_xvreplve_w +name = lasx_xvreplve_w +asm-fmts = xd, xj, rk +data-types = V8SI, V8SI, SI + +/// lasx_xvreplve_d +name = lasx_xvreplve_d +asm-fmts = xd, xj, rk +data-types = V4DI, V4DI, SI + +/// lasx_xvpermi_w +name = lasx_xvpermi_w +asm-fmts = xd, xj, ui8 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvandn_v +name = lasx_xvandn_v +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvneg_b +name = lasx_xvneg_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvneg_h +name = lasx_xvneg_h +asm-fmts = xd, xj +data-types = V16HI, V16HI + +/// lasx_xvneg_w +name = lasx_xvneg_w +asm-fmts = xd, xj +data-types = V8SI, V8SI + +/// lasx_xvneg_d +name = lasx_xvneg_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvmuh_b +name = lasx_xvmuh_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvmuh_h +name = lasx_xvmuh_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvmuh_w +name = lasx_xvmuh_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvmuh_d +name = lasx_xvmuh_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmuh_bu +name = lasx_xvmuh_bu +asm-fmts = xd, xj, xk +data-types = UV32QI, UV32QI, UV32QI + +/// lasx_xvmuh_hu +name = lasx_xvmuh_hu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV16HI + +/// lasx_xvmuh_wu +name = lasx_xvmuh_wu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV8SI + +/// lasx_xvmuh_du +name = lasx_xvmuh_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvsllwil_h_b +name = lasx_xvsllwil_h_b +asm-fmts = xd, xj, ui3 +data-types = V16HI, V32QI, UQI + +/// lasx_xvsllwil_w_h +name = lasx_xvsllwil_w_h +asm-fmts = xd, xj, ui4 +data-types = V8SI, V16HI, UQI + +/// lasx_xvsllwil_d_w +name = lasx_xvsllwil_d_w +asm-fmts = xd, xj, ui5 +data-types = V4DI, V8SI, UQI + +/// lasx_xvsllwil_hu_bu +name = lasx_xvsllwil_hu_bu +asm-fmts = xd, xj, ui3 +data-types = UV16HI, UV32QI, UQI + +/// lasx_xvsllwil_wu_hu +name = lasx_xvsllwil_wu_hu +asm-fmts = xd, xj, ui4 +data-types = UV8SI, UV16HI, UQI + +/// lasx_xvsllwil_du_wu +name = lasx_xvsllwil_du_wu +asm-fmts = xd, xj, ui5 +data-types = UV4DI, UV8SI, UQI + +/// lasx_xvsran_b_h +name = lasx_xvsran_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvsran_h_w +name = lasx_xvsran_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvsran_w_d +name = lasx_xvsran_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssran_b_h +name = lasx_xvssran_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvssran_h_w +name = lasx_xvssran_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvssran_w_d +name = lasx_xvssran_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssran_bu_h +name = lasx_xvssran_bu_h +asm-fmts = xd, xj, xk +data-types = UV32QI, UV16HI, UV16HI + +/// lasx_xvssran_hu_w +name = lasx_xvssran_hu_w +asm-fmts = xd, xj, xk +data-types = UV16HI, UV8SI, UV8SI + +/// lasx_xvssran_wu_d +name = lasx_xvssran_wu_d +asm-fmts = xd, xj, xk +data-types = UV8SI, UV4DI, UV4DI + +/// lasx_xvsrarn_b_h +name = lasx_xvsrarn_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvsrarn_h_w +name = lasx_xvsrarn_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvsrarn_w_d +name = lasx_xvsrarn_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssrarn_b_h +name = lasx_xvssrarn_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvssrarn_h_w +name = lasx_xvssrarn_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvssrarn_w_d +name = lasx_xvssrarn_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssrarn_bu_h +name = lasx_xvssrarn_bu_h +asm-fmts = xd, xj, xk +data-types = UV32QI, UV16HI, UV16HI + +/// lasx_xvssrarn_hu_w +name = lasx_xvssrarn_hu_w +asm-fmts = xd, xj, xk +data-types = UV16HI, UV8SI, UV8SI + +/// lasx_xvssrarn_wu_d +name = lasx_xvssrarn_wu_d +asm-fmts = xd, xj, xk +data-types = UV8SI, UV4DI, UV4DI + +/// lasx_xvsrln_b_h +name = lasx_xvsrln_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvsrln_h_w +name = lasx_xvsrln_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvsrln_w_d +name = lasx_xvsrln_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssrln_bu_h +name = lasx_xvssrln_bu_h +asm-fmts = xd, xj, xk +data-types = UV32QI, UV16HI, UV16HI + +/// lasx_xvssrln_hu_w +name = lasx_xvssrln_hu_w +asm-fmts = xd, xj, xk +data-types = UV16HI, UV8SI, UV8SI + +/// lasx_xvssrln_wu_d +name = lasx_xvssrln_wu_d +asm-fmts = xd, xj, xk +data-types = UV8SI, UV4DI, UV4DI + +/// lasx_xvsrlrn_b_h +name = lasx_xvsrlrn_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvsrlrn_h_w +name = lasx_xvsrlrn_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvsrlrn_w_d +name = lasx_xvsrlrn_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssrlrn_bu_h +name = lasx_xvssrlrn_bu_h +asm-fmts = xd, xj, xk +data-types = UV32QI, UV16HI, UV16HI + +/// lasx_xvssrlrn_hu_w +name = lasx_xvssrlrn_hu_w +asm-fmts = xd, xj, xk +data-types = UV16HI, UV8SI, UV8SI + +/// lasx_xvssrlrn_wu_d +name = lasx_xvssrlrn_wu_d +asm-fmts = xd, xj, xk +data-types = UV8SI, UV4DI, UV4DI + +/// lasx_xvfrstpi_b +name = lasx_xvfrstpi_b +asm-fmts = xd, xj, ui5 +data-types = V32QI, V32QI, V32QI, UQI + +/// lasx_xvfrstpi_h +name = lasx_xvfrstpi_h +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, UQI + +/// lasx_xvfrstp_b +name = lasx_xvfrstp_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI, V32QI + +/// lasx_xvfrstp_h +name = lasx_xvfrstp_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI, V16HI + +/// lasx_xvshuf4i_d +name = lasx_xvshuf4i_d +asm-fmts = xd, xj, ui8 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvbsrl_v +name = lasx_xvbsrl_v +asm-fmts = xd, xj, ui5 +data-types = V32QI, V32QI, UQI + +/// lasx_xvbsll_v +name = lasx_xvbsll_v +asm-fmts = xd, xj, ui5 +data-types = V32QI, V32QI, UQI + +/// lasx_xvextrins_b +name = lasx_xvextrins_b +asm-fmts = xd, xj, ui8 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvextrins_h +name = lasx_xvextrins_h +asm-fmts = xd, xj, ui8 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvextrins_w +name = lasx_xvextrins_w +asm-fmts = xd, xj, ui8 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvextrins_d +name = lasx_xvextrins_d +asm-fmts = xd, xj, ui8 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvmskltz_b +name = lasx_xvmskltz_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvmskltz_h +name = lasx_xvmskltz_h +asm-fmts = xd, xj +data-types = V16HI, V16HI + +/// lasx_xvmskltz_w +name = lasx_xvmskltz_w +asm-fmts = xd, xj +data-types = V8SI, V8SI + +/// lasx_xvmskltz_d +name = lasx_xvmskltz_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvsigncov_b +name = lasx_xvsigncov_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvsigncov_h +name = lasx_xvsigncov_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvsigncov_w +name = lasx_xvsigncov_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvsigncov_d +name = lasx_xvsigncov_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvfmadd_s +name = lasx_xvfmadd_s +asm-fmts = xd, xj, xk, xa +data-types = V8SF, V8SF, V8SF, V8SF + +/// lasx_xvfmadd_d +name = lasx_xvfmadd_d +asm-fmts = xd, xj, xk, xa +data-types = V4DF, V4DF, V4DF, V4DF + +/// lasx_xvfmsub_s +name = lasx_xvfmsub_s +asm-fmts = xd, xj, xk, xa +data-types = V8SF, V8SF, V8SF, V8SF + +/// lasx_xvfmsub_d +name = lasx_xvfmsub_d +asm-fmts = xd, xj, xk, xa +data-types = V4DF, V4DF, V4DF, V4DF + +/// lasx_xvfnmadd_s +name = lasx_xvfnmadd_s +asm-fmts = xd, xj, xk, xa +data-types = V8SF, V8SF, V8SF, V8SF + +/// lasx_xvfnmadd_d +name = lasx_xvfnmadd_d +asm-fmts = xd, xj, xk, xa +data-types = V4DF, V4DF, V4DF, V4DF + +/// lasx_xvfnmsub_s +name = lasx_xvfnmsub_s +asm-fmts = xd, xj, xk, xa +data-types = V8SF, V8SF, V8SF, V8SF + +/// lasx_xvfnmsub_d +name = lasx_xvfnmsub_d +asm-fmts = xd, xj, xk, xa +data-types = V4DF, V4DF, V4DF, V4DF + +/// lasx_xvftintrne_w_s +name = lasx_xvftintrne_w_s +asm-fmts = xd, xj +data-types = V8SI, V8SF + +/// lasx_xvftintrne_l_d +name = lasx_xvftintrne_l_d +asm-fmts = xd, xj +data-types = V4DI, V4DF + +/// lasx_xvftintrp_w_s +name = lasx_xvftintrp_w_s +asm-fmts = xd, xj +data-types = V8SI, V8SF + +/// lasx_xvftintrp_l_d +name = lasx_xvftintrp_l_d +asm-fmts = xd, xj +data-types = V4DI, V4DF + +/// lasx_xvftintrm_w_s +name = lasx_xvftintrm_w_s +asm-fmts = xd, xj +data-types = V8SI, V8SF + +/// lasx_xvftintrm_l_d +name = lasx_xvftintrm_l_d +asm-fmts = xd, xj +data-types = V4DI, V4DF + +/// lasx_xvftint_w_d +name = lasx_xvftint_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DF, V4DF + +/// lasx_xvffint_s_l +name = lasx_xvffint_s_l +asm-fmts = xd, xj, xk +data-types = V8SF, V4DI, V4DI + +/// lasx_xvftintrz_w_d +name = lasx_xvftintrz_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DF, V4DF + +/// lasx_xvftintrp_w_d +name = lasx_xvftintrp_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DF, V4DF + +/// lasx_xvftintrm_w_d +name = lasx_xvftintrm_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DF, V4DF + +/// lasx_xvftintrne_w_d +name = lasx_xvftintrne_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DF, V4DF + +/// lasx_xvftinth_l_s +name = lasx_xvftinth_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintl_l_s +name = lasx_xvftintl_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvffinth_d_w +name = lasx_xvffinth_d_w +asm-fmts = xd, xj +data-types = V4DF, V8SI + +/// lasx_xvffintl_d_w +name = lasx_xvffintl_d_w +asm-fmts = xd, xj +data-types = V4DF, V8SI + +/// lasx_xvftintrzh_l_s +name = lasx_xvftintrzh_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrzl_l_s +name = lasx_xvftintrzl_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrph_l_s +name = lasx_xvftintrph_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrpl_l_s +name = lasx_xvftintrpl_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrmh_l_s +name = lasx_xvftintrmh_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrml_l_s +name = lasx_xvftintrml_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrneh_l_s +name = lasx_xvftintrneh_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvftintrnel_l_s +name = lasx_xvftintrnel_l_s +asm-fmts = xd, xj +data-types = V4DI, V8SF + +/// lasx_xvfrintrne_s +name = lasx_xvfrintrne_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrintrne_d +name = lasx_xvfrintrne_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfrintrz_s +name = lasx_xvfrintrz_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrintrz_d +name = lasx_xvfrintrz_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfrintrp_s +name = lasx_xvfrintrp_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrintrp_d +name = lasx_xvfrintrp_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvfrintrm_s +name = lasx_xvfrintrm_s +asm-fmts = xd, xj +data-types = V8SF, V8SF + +/// lasx_xvfrintrm_d +name = lasx_xvfrintrm_d +asm-fmts = xd, xj +data-types = V4DF, V4DF + +/// lasx_xvld +name = lasx_xvld +asm-fmts = xd, rj, si12 +data-types = V32QI, CVPOINTER, SI + +/// lasx_xvst +name = lasx_xvst +asm-fmts = xd, rj, si12 +data-types = VOID, V32QI, CVPOINTER, SI + +/// lasx_xvstelm_b +name = lasx_xvstelm_b +asm-fmts = xd, rj, si8, idx +data-types = VOID, V32QI, CVPOINTER, SI, UQI + +/// lasx_xvstelm_h +name = lasx_xvstelm_h +asm-fmts = xd, rj, si8, idx +data-types = VOID, V16HI, CVPOINTER, SI, UQI + +/// lasx_xvstelm_w +name = lasx_xvstelm_w +asm-fmts = xd, rj, si8, idx +data-types = VOID, V8SI, CVPOINTER, SI, UQI + +/// lasx_xvstelm_d +name = lasx_xvstelm_d +asm-fmts = xd, rj, si8, idx +data-types = VOID, V4DI, CVPOINTER, SI, UQI + +/// lasx_xvinsve0_w +name = lasx_xvinsve0_w +asm-fmts = xd, xj, ui3 +data-types = V8SI, V8SI, V8SI, UQI + +/// lasx_xvinsve0_d +name = lasx_xvinsve0_d +asm-fmts = xd, xj, ui2 +data-types = V4DI, V4DI, V4DI, UQI + +/// lasx_xvpickve_w +name = lasx_xvpickve_w +asm-fmts = xd, xj, ui3 +data-types = V8SI, V8SI, UQI + +/// lasx_xvpickve_d +name = lasx_xvpickve_d +asm-fmts = xd, xj, ui2 +data-types = V4DI, V4DI, UQI + +/// lasx_xvssrlrn_b_h +name = lasx_xvssrlrn_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvssrlrn_h_w +name = lasx_xvssrlrn_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvssrlrn_w_d +name = lasx_xvssrlrn_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvssrln_b_h +name = lasx_xvssrln_b_h +asm-fmts = xd, xj, xk +data-types = V32QI, V16HI, V16HI + +/// lasx_xvssrln_h_w +name = lasx_xvssrln_h_w +asm-fmts = xd, xj, xk +data-types = V16HI, V8SI, V8SI + +/// lasx_xvssrln_w_d +name = lasx_xvssrln_w_d +asm-fmts = xd, xj, xk +data-types = V8SI, V4DI, V4DI + +/// lasx_xvorn_v +name = lasx_xvorn_v +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvldi +name = lasx_xvldi +asm-fmts = xd, i13 +data-types = V4DI, HI + +/// lasx_xvldx +name = lasx_xvldx +asm-fmts = xd, rj, rk +data-types = V32QI, CVPOINTER, DI + +/// lasx_xvstx +name = lasx_xvstx +asm-fmts = xd, rj, rk +data-types = VOID, V32QI, CVPOINTER, DI + +/// lasx_xvextl_qu_du +name = lasx_xvextl_qu_du +asm-fmts = xd, xj +data-types = UV4DI, UV4DI + +/// lasx_xvinsgr2vr_w +name = lasx_xvinsgr2vr_w +asm-fmts = xd, rj, ui3 +data-types = V8SI, V8SI, SI, UQI + +/// lasx_xvinsgr2vr_d +name = lasx_xvinsgr2vr_d +asm-fmts = xd, rj, ui2 +data-types = V4DI, V4DI, DI, UQI + +/// lasx_xvreplve0_b +name = lasx_xvreplve0_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvreplve0_h +name = lasx_xvreplve0_h +asm-fmts = xd, xj +data-types = V16HI, V16HI + +/// lasx_xvreplve0_w +name = lasx_xvreplve0_w +asm-fmts = xd, xj +data-types = V8SI, V8SI + +/// lasx_xvreplve0_d +name = lasx_xvreplve0_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvreplve0_q +name = lasx_xvreplve0_q +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_vext2xv_h_b +name = lasx_vext2xv_h_b +asm-fmts = xd, xj +data-types = V16HI, V32QI + +/// lasx_vext2xv_w_h +name = lasx_vext2xv_w_h +asm-fmts = xd, xj +data-types = V8SI, V16HI + +/// lasx_vext2xv_d_w +name = lasx_vext2xv_d_w +asm-fmts = xd, xj +data-types = V4DI, V8SI + +/// lasx_vext2xv_w_b +name = lasx_vext2xv_w_b +asm-fmts = xd, xj +data-types = V8SI, V32QI + +/// lasx_vext2xv_d_h +name = lasx_vext2xv_d_h +asm-fmts = xd, xj +data-types = V4DI, V16HI + +/// lasx_vext2xv_d_b +name = lasx_vext2xv_d_b +asm-fmts = xd, xj +data-types = V4DI, V32QI + +/// lasx_vext2xv_hu_bu +name = lasx_vext2xv_hu_bu +asm-fmts = xd, xj +data-types = V16HI, V32QI + +/// lasx_vext2xv_wu_hu +name = lasx_vext2xv_wu_hu +asm-fmts = xd, xj +data-types = V8SI, V16HI + +/// lasx_vext2xv_du_wu +name = lasx_vext2xv_du_wu +asm-fmts = xd, xj +data-types = V4DI, V8SI + +/// lasx_vext2xv_wu_bu +name = lasx_vext2xv_wu_bu +asm-fmts = xd, xj +data-types = V8SI, V32QI + +/// lasx_vext2xv_du_hu +name = lasx_vext2xv_du_hu +asm-fmts = xd, xj +data-types = V4DI, V16HI + +/// lasx_vext2xv_du_bu +name = lasx_vext2xv_du_bu +asm-fmts = xd, xj +data-types = V4DI, V32QI + +/// lasx_xvpermi_q +name = lasx_xvpermi_q +asm-fmts = xd, xj, ui8 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvpermi_d +name = lasx_xvpermi_d +asm-fmts = xd, xj, ui8 +data-types = V4DI, V4DI, USI + +/// lasx_xvperm_w +name = lasx_xvperm_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvldrepl_b +name = lasx_xvldrepl_b +asm-fmts = xd, rj, si12 +data-types = V32QI, CVPOINTER, SI + +/// lasx_xvldrepl_h +name = lasx_xvldrepl_h +asm-fmts = xd, rj, si11 +data-types = V16HI, CVPOINTER, SI + +/// lasx_xvldrepl_w +name = lasx_xvldrepl_w +asm-fmts = xd, rj, si10 +data-types = V8SI, CVPOINTER, SI + +/// lasx_xvldrepl_d +name = lasx_xvldrepl_d +asm-fmts = xd, rj, si9 +data-types = V4DI, CVPOINTER, SI + +/// lasx_xvpickve2gr_w +name = lasx_xvpickve2gr_w +asm-fmts = rd, xj, ui3 +data-types = SI, V8SI, UQI + +/// lasx_xvpickve2gr_wu +name = lasx_xvpickve2gr_wu +asm-fmts = rd, xj, ui3 +data-types = USI, V8SI, UQI + +/// lasx_xvpickve2gr_d +name = lasx_xvpickve2gr_d +asm-fmts = rd, xj, ui2 +data-types = DI, V4DI, UQI + +/// lasx_xvpickve2gr_du +name = lasx_xvpickve2gr_du +asm-fmts = rd, xj, ui2 +data-types = UDI, V4DI, UQI + +/// lasx_xvaddwev_q_d +name = lasx_xvaddwev_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvaddwev_d_w +name = lasx_xvaddwev_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvaddwev_w_h +name = lasx_xvaddwev_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvaddwev_h_b +name = lasx_xvaddwev_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvaddwev_q_du +name = lasx_xvaddwev_q_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvaddwev_d_wu +name = lasx_xvaddwev_d_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvaddwev_w_hu +name = lasx_xvaddwev_w_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvaddwev_h_bu +name = lasx_xvaddwev_h_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvsubwev_q_d +name = lasx_xvsubwev_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsubwev_d_w +name = lasx_xvsubwev_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvsubwev_w_h +name = lasx_xvsubwev_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvsubwev_h_b +name = lasx_xvsubwev_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvsubwev_q_du +name = lasx_xvsubwev_q_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvsubwev_d_wu +name = lasx_xvsubwev_d_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvsubwev_w_hu +name = lasx_xvsubwev_w_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvsubwev_h_bu +name = lasx_xvsubwev_h_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvmulwev_q_d +name = lasx_xvmulwev_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmulwev_d_w +name = lasx_xvmulwev_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvmulwev_w_h +name = lasx_xvmulwev_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvmulwev_h_b +name = lasx_xvmulwev_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvmulwev_q_du +name = lasx_xvmulwev_q_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvmulwev_d_wu +name = lasx_xvmulwev_d_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvmulwev_w_hu +name = lasx_xvmulwev_w_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvmulwev_h_bu +name = lasx_xvmulwev_h_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvaddwod_q_d +name = lasx_xvaddwod_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvaddwod_d_w +name = lasx_xvaddwod_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvaddwod_w_h +name = lasx_xvaddwod_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvaddwod_h_b +name = lasx_xvaddwod_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvaddwod_q_du +name = lasx_xvaddwod_q_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvaddwod_d_wu +name = lasx_xvaddwod_d_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvaddwod_w_hu +name = lasx_xvaddwod_w_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvaddwod_h_bu +name = lasx_xvaddwod_h_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvsubwod_q_d +name = lasx_xvsubwod_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsubwod_d_w +name = lasx_xvsubwod_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvsubwod_w_h +name = lasx_xvsubwod_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvsubwod_h_b +name = lasx_xvsubwod_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvsubwod_q_du +name = lasx_xvsubwod_q_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvsubwod_d_wu +name = lasx_xvsubwod_d_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvsubwod_w_hu +name = lasx_xvsubwod_w_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvsubwod_h_bu +name = lasx_xvsubwod_h_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvmulwod_q_d +name = lasx_xvmulwod_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvmulwod_d_w +name = lasx_xvmulwod_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V8SI, V8SI + +/// lasx_xvmulwod_w_h +name = lasx_xvmulwod_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V16HI, V16HI + +/// lasx_xvmulwod_h_b +name = lasx_xvmulwod_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V32QI, V32QI + +/// lasx_xvmulwod_q_du +name = lasx_xvmulwod_q_du +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, UV4DI + +/// lasx_xvmulwod_d_wu +name = lasx_xvmulwod_d_wu +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, UV8SI + +/// lasx_xvmulwod_w_hu +name = lasx_xvmulwod_w_hu +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, UV16HI + +/// lasx_xvmulwod_h_bu +name = lasx_xvmulwod_h_bu +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, UV32QI + +/// lasx_xvaddwev_d_wu_w +name = lasx_xvaddwev_d_wu_w +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, V8SI + +/// lasx_xvaddwev_w_hu_h +name = lasx_xvaddwev_w_hu_h +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, V16HI + +/// lasx_xvaddwev_h_bu_b +name = lasx_xvaddwev_h_bu_b +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, V32QI + +/// lasx_xvmulwev_d_wu_w +name = lasx_xvmulwev_d_wu_w +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, V8SI + +/// lasx_xvmulwev_w_hu_h +name = lasx_xvmulwev_w_hu_h +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, V16HI + +/// lasx_xvmulwev_h_bu_b +name = lasx_xvmulwev_h_bu_b +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, V32QI + +/// lasx_xvaddwod_d_wu_w +name = lasx_xvaddwod_d_wu_w +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, V8SI + +/// lasx_xvaddwod_w_hu_h +name = lasx_xvaddwod_w_hu_h +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, V16HI + +/// lasx_xvaddwod_h_bu_b +name = lasx_xvaddwod_h_bu_b +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, V32QI + +/// lasx_xvmulwod_d_wu_w +name = lasx_xvmulwod_d_wu_w +asm-fmts = xd, xj, xk +data-types = V4DI, UV8SI, V8SI + +/// lasx_xvmulwod_w_hu_h +name = lasx_xvmulwod_w_hu_h +asm-fmts = xd, xj, xk +data-types = V8SI, UV16HI, V16HI + +/// lasx_xvmulwod_h_bu_b +name = lasx_xvmulwod_h_bu_b +asm-fmts = xd, xj, xk +data-types = V16HI, UV32QI, V32QI + +/// lasx_xvhaddw_q_d +name = lasx_xvhaddw_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvhaddw_qu_du +name = lasx_xvhaddw_qu_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvhsubw_q_d +name = lasx_xvhsubw_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvhsubw_qu_du +name = lasx_xvhsubw_qu_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI + +/// lasx_xvmaddwev_q_d +name = lasx_xvmaddwev_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI, V4DI + +/// lasx_xvmaddwev_d_w +name = lasx_xvmaddwev_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V8SI, V8SI + +/// lasx_xvmaddwev_w_h +name = lasx_xvmaddwev_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V16HI, V16HI + +/// lasx_xvmaddwev_h_b +name = lasx_xvmaddwev_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V32QI, V32QI + +/// lasx_xvmaddwev_q_du +name = lasx_xvmaddwev_q_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI, UV4DI + +/// lasx_xvmaddwev_d_wu +name = lasx_xvmaddwev_d_wu +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV8SI, UV8SI + +/// lasx_xvmaddwev_w_hu +name = lasx_xvmaddwev_w_hu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV16HI, UV16HI + +/// lasx_xvmaddwev_h_bu +name = lasx_xvmaddwev_h_bu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV32QI, UV32QI + +/// lasx_xvmaddwod_q_d +name = lasx_xvmaddwod_q_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI, V4DI + +/// lasx_xvmaddwod_d_w +name = lasx_xvmaddwod_d_w +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V8SI, V8SI + +/// lasx_xvmaddwod_w_h +name = lasx_xvmaddwod_w_h +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V16HI, V16HI + +/// lasx_xvmaddwod_h_b +name = lasx_xvmaddwod_h_b +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V32QI, V32QI + +/// lasx_xvmaddwod_q_du +name = lasx_xvmaddwod_q_du +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV4DI, UV4DI + +/// lasx_xvmaddwod_d_wu +name = lasx_xvmaddwod_d_wu +asm-fmts = xd, xj, xk +data-types = UV4DI, UV4DI, UV8SI, UV8SI + +/// lasx_xvmaddwod_w_hu +name = lasx_xvmaddwod_w_hu +asm-fmts = xd, xj, xk +data-types = UV8SI, UV8SI, UV16HI, UV16HI + +/// lasx_xvmaddwod_h_bu +name = lasx_xvmaddwod_h_bu +asm-fmts = xd, xj, xk +data-types = UV16HI, UV16HI, UV32QI, UV32QI + +/// lasx_xvmaddwev_q_du_d +name = lasx_xvmaddwev_q_du_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, UV4DI, V4DI + +/// lasx_xvmaddwev_d_wu_w +name = lasx_xvmaddwev_d_wu_w +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, UV8SI, V8SI + +/// lasx_xvmaddwev_w_hu_h +name = lasx_xvmaddwev_w_hu_h +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, UV16HI, V16HI + +/// lasx_xvmaddwev_h_bu_b +name = lasx_xvmaddwev_h_bu_b +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, UV32QI, V32QI + +/// lasx_xvmaddwod_q_du_d +name = lasx_xvmaddwod_q_du_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, UV4DI, V4DI + +/// lasx_xvmaddwod_d_wu_w +name = lasx_xvmaddwod_d_wu_w +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, UV8SI, V8SI + +/// lasx_xvmaddwod_w_hu_h +name = lasx_xvmaddwod_w_hu_h +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, UV16HI, V16HI + +/// lasx_xvmaddwod_h_bu_b +name = lasx_xvmaddwod_h_bu_b +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, UV32QI, V32QI + +/// lasx_xvrotr_b +name = lasx_xvrotr_b +asm-fmts = xd, xj, xk +data-types = V32QI, V32QI, V32QI + +/// lasx_xvrotr_h +name = lasx_xvrotr_h +asm-fmts = xd, xj, xk +data-types = V16HI, V16HI, V16HI + +/// lasx_xvrotr_w +name = lasx_xvrotr_w +asm-fmts = xd, xj, xk +data-types = V8SI, V8SI, V8SI + +/// lasx_xvrotr_d +name = lasx_xvrotr_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvadd_q +name = lasx_xvadd_q +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvsub_q +name = lasx_xvsub_q +asm-fmts = xd, xj, xk +data-types = V4DI, V4DI, V4DI + +/// lasx_xvaddwev_q_du_d +name = lasx_xvaddwev_q_du_d +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, V4DI + +/// lasx_xvaddwod_q_du_d +name = lasx_xvaddwod_q_du_d +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, V4DI + +/// lasx_xvmulwev_q_du_d +name = lasx_xvmulwev_q_du_d +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, V4DI + +/// lasx_xvmulwod_q_du_d +name = lasx_xvmulwod_q_du_d +asm-fmts = xd, xj, xk +data-types = V4DI, UV4DI, V4DI + +/// lasx_xvmskgez_b +name = lasx_xvmskgez_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvmsknz_b +name = lasx_xvmsknz_b +asm-fmts = xd, xj +data-types = V32QI, V32QI + +/// lasx_xvexth_h_b +name = lasx_xvexth_h_b +asm-fmts = xd, xj +data-types = V16HI, V32QI + +/// lasx_xvexth_w_h +name = lasx_xvexth_w_h +asm-fmts = xd, xj +data-types = V8SI, V16HI + +/// lasx_xvexth_d_w +name = lasx_xvexth_d_w +asm-fmts = xd, xj +data-types = V4DI, V8SI + +/// lasx_xvexth_q_d +name = lasx_xvexth_q_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvexth_hu_bu +name = lasx_xvexth_hu_bu +asm-fmts = xd, xj +data-types = UV16HI, UV32QI + +/// lasx_xvexth_wu_hu +name = lasx_xvexth_wu_hu +asm-fmts = xd, xj +data-types = UV8SI, UV16HI + +/// lasx_xvexth_du_wu +name = lasx_xvexth_du_wu +asm-fmts = xd, xj +data-types = UV4DI, UV8SI + +/// lasx_xvexth_qu_du +name = lasx_xvexth_qu_du +asm-fmts = xd, xj +data-types = UV4DI, UV4DI + +/// lasx_xvrotri_b +name = lasx_xvrotri_b +asm-fmts = xd, xj, ui3 +data-types = V32QI, V32QI, UQI + +/// lasx_xvrotri_h +name = lasx_xvrotri_h +asm-fmts = xd, xj, ui4 +data-types = V16HI, V16HI, UQI + +/// lasx_xvrotri_w +name = lasx_xvrotri_w +asm-fmts = xd, xj, ui5 +data-types = V8SI, V8SI, UQI + +/// lasx_xvrotri_d +name = lasx_xvrotri_d +asm-fmts = xd, xj, ui6 +data-types = V4DI, V4DI, UQI + +/// lasx_xvextl_q_d +name = lasx_xvextl_q_d +asm-fmts = xd, xj +data-types = V4DI, V4DI + +/// lasx_xvsrlni_b_h +name = lasx_xvsrlni_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvsrlni_h_w +name = lasx_xvsrlni_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvsrlni_w_d +name = lasx_xvsrlni_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvsrlni_d_q +name = lasx_xvsrlni_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvsrlrni_b_h +name = lasx_xvsrlrni_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvsrlrni_h_w +name = lasx_xvsrlrni_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvsrlrni_w_d +name = lasx_xvsrlrni_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvsrlrni_d_q +name = lasx_xvsrlrni_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvssrlni_b_h +name = lasx_xvssrlni_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvssrlni_h_w +name = lasx_xvssrlni_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvssrlni_w_d +name = lasx_xvssrlni_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvssrlni_d_q +name = lasx_xvssrlni_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvssrlni_bu_h +name = lasx_xvssrlni_bu_h +asm-fmts = xd, xj, ui4 +data-types = UV32QI, UV32QI, V32QI, USI + +/// lasx_xvssrlni_hu_w +name = lasx_xvssrlni_hu_w +asm-fmts = xd, xj, ui5 +data-types = UV16HI, UV16HI, V16HI, USI + +/// lasx_xvssrlni_wu_d +name = lasx_xvssrlni_wu_d +asm-fmts = xd, xj, ui6 +data-types = UV8SI, UV8SI, V8SI, USI + +/// lasx_xvssrlni_du_q +name = lasx_xvssrlni_du_q +asm-fmts = xd, xj, ui7 +data-types = UV4DI, UV4DI, V4DI, USI + +/// lasx_xvssrlrni_b_h +name = lasx_xvssrlrni_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvssrlrni_h_w +name = lasx_xvssrlrni_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvssrlrni_w_d +name = lasx_xvssrlrni_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvssrlrni_d_q +name = lasx_xvssrlrni_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvssrlrni_bu_h +name = lasx_xvssrlrni_bu_h +asm-fmts = xd, xj, ui4 +data-types = UV32QI, UV32QI, V32QI, USI + +/// lasx_xvssrlrni_hu_w +name = lasx_xvssrlrni_hu_w +asm-fmts = xd, xj, ui5 +data-types = UV16HI, UV16HI, V16HI, USI + +/// lasx_xvssrlrni_wu_d +name = lasx_xvssrlrni_wu_d +asm-fmts = xd, xj, ui6 +data-types = UV8SI, UV8SI, V8SI, USI + +/// lasx_xvssrlrni_du_q +name = lasx_xvssrlrni_du_q +asm-fmts = xd, xj, ui7 +data-types = UV4DI, UV4DI, V4DI, USI + +/// lasx_xvsrani_b_h +name = lasx_xvsrani_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvsrani_h_w +name = lasx_xvsrani_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvsrani_w_d +name = lasx_xvsrani_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvsrani_d_q +name = lasx_xvsrani_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvsrarni_b_h +name = lasx_xvsrarni_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvsrarni_h_w +name = lasx_xvsrarni_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvsrarni_w_d +name = lasx_xvsrarni_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvsrarni_d_q +name = lasx_xvsrarni_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvssrani_b_h +name = lasx_xvssrani_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvssrani_h_w +name = lasx_xvssrani_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvssrani_w_d +name = lasx_xvssrani_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvssrani_d_q +name = lasx_xvssrani_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvssrani_bu_h +name = lasx_xvssrani_bu_h +asm-fmts = xd, xj, ui4 +data-types = UV32QI, UV32QI, V32QI, USI + +/// lasx_xvssrani_hu_w +name = lasx_xvssrani_hu_w +asm-fmts = xd, xj, ui5 +data-types = UV16HI, UV16HI, V16HI, USI + +/// lasx_xvssrani_wu_d +name = lasx_xvssrani_wu_d +asm-fmts = xd, xj, ui6 +data-types = UV8SI, UV8SI, V8SI, USI + +/// lasx_xvssrani_du_q +name = lasx_xvssrani_du_q +asm-fmts = xd, xj, ui7 +data-types = UV4DI, UV4DI, V4DI, USI + +/// lasx_xvssrarni_b_h +name = lasx_xvssrarni_b_h +asm-fmts = xd, xj, ui4 +data-types = V32QI, V32QI, V32QI, USI + +/// lasx_xvssrarni_h_w +name = lasx_xvssrarni_h_w +asm-fmts = xd, xj, ui5 +data-types = V16HI, V16HI, V16HI, USI + +/// lasx_xvssrarni_w_d +name = lasx_xvssrarni_w_d +asm-fmts = xd, xj, ui6 +data-types = V8SI, V8SI, V8SI, USI + +/// lasx_xvssrarni_d_q +name = lasx_xvssrarni_d_q +asm-fmts = xd, xj, ui7 +data-types = V4DI, V4DI, V4DI, USI + +/// lasx_xvssrarni_bu_h +name = lasx_xvssrarni_bu_h +asm-fmts = xd, xj, ui4 +data-types = UV32QI, UV32QI, V32QI, USI + +/// lasx_xvssrarni_hu_w +name = lasx_xvssrarni_hu_w +asm-fmts = xd, xj, ui5 +data-types = UV16HI, UV16HI, V16HI, USI + +/// lasx_xvssrarni_wu_d +name = lasx_xvssrarni_wu_d +asm-fmts = xd, xj, ui6 +data-types = UV8SI, UV8SI, V8SI, USI + +/// lasx_xvssrarni_du_q +name = lasx_xvssrarni_du_q +asm-fmts = xd, xj, ui7 +data-types = UV4DI, UV4DI, V4DI, USI + +/// lasx_xbnz_b +name = lasx_xbnz_b +asm-fmts = cd, xj +data-types = SI, UV32QI + +/// lasx_xbnz_d +name = lasx_xbnz_d +asm-fmts = cd, xj +data-types = SI, UV4DI + +/// lasx_xbnz_h +name = lasx_xbnz_h +asm-fmts = cd, xj +data-types = SI, UV16HI + +/// lasx_xbnz_v +name = lasx_xbnz_v +asm-fmts = cd, xj +data-types = SI, UV32QI + +/// lasx_xbnz_w +name = lasx_xbnz_w +asm-fmts = cd, xj +data-types = SI, UV8SI + +/// lasx_xbz_b +name = lasx_xbz_b +asm-fmts = cd, xj +data-types = SI, UV32QI + +/// lasx_xbz_d +name = lasx_xbz_d +asm-fmts = cd, xj +data-types = SI, UV4DI + +/// lasx_xbz_h +name = lasx_xbz_h +asm-fmts = cd, xj +data-types = SI, UV16HI + +/// lasx_xbz_v +name = lasx_xbz_v +asm-fmts = cd, xj +data-types = SI, UV32QI + +/// lasx_xbz_w +name = lasx_xbz_w +asm-fmts = cd, xj +data-types = SI, UV8SI + +/// lasx_xvfcmp_caf_d +name = lasx_xvfcmp_caf_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_caf_s +name = lasx_xvfcmp_caf_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_ceq_d +name = lasx_xvfcmp_ceq_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_ceq_s +name = lasx_xvfcmp_ceq_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cle_d +name = lasx_xvfcmp_cle_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cle_s +name = lasx_xvfcmp_cle_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_clt_d +name = lasx_xvfcmp_clt_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_clt_s +name = lasx_xvfcmp_clt_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cne_d +name = lasx_xvfcmp_cne_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cne_s +name = lasx_xvfcmp_cne_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cor_d +name = lasx_xvfcmp_cor_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cor_s +name = lasx_xvfcmp_cor_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cueq_d +name = lasx_xvfcmp_cueq_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cueq_s +name = lasx_xvfcmp_cueq_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cule_d +name = lasx_xvfcmp_cule_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cule_s +name = lasx_xvfcmp_cule_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cult_d +name = lasx_xvfcmp_cult_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cult_s +name = lasx_xvfcmp_cult_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cun_d +name = lasx_xvfcmp_cun_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cune_d +name = lasx_xvfcmp_cune_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_cune_s +name = lasx_xvfcmp_cune_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_cun_s +name = lasx_xvfcmp_cun_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_saf_d +name = lasx_xvfcmp_saf_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_saf_s +name = lasx_xvfcmp_saf_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_seq_d +name = lasx_xvfcmp_seq_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_seq_s +name = lasx_xvfcmp_seq_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sle_d +name = lasx_xvfcmp_sle_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sle_s +name = lasx_xvfcmp_sle_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_slt_d +name = lasx_xvfcmp_slt_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_slt_s +name = lasx_xvfcmp_slt_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sne_d +name = lasx_xvfcmp_sne_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sne_s +name = lasx_xvfcmp_sne_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sor_d +name = lasx_xvfcmp_sor_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sor_s +name = lasx_xvfcmp_sor_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sueq_d +name = lasx_xvfcmp_sueq_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sueq_s +name = lasx_xvfcmp_sueq_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sule_d +name = lasx_xvfcmp_sule_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sule_s +name = lasx_xvfcmp_sule_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sult_d +name = lasx_xvfcmp_sult_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sult_s +name = lasx_xvfcmp_sult_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sun_d +name = lasx_xvfcmp_sun_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sune_d +name = lasx_xvfcmp_sune_d +asm-fmts = xd, xj, xk +data-types = V4DI, V4DF, V4DF + +/// lasx_xvfcmp_sune_s +name = lasx_xvfcmp_sune_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvfcmp_sun_s +name = lasx_xvfcmp_sun_s +asm-fmts = xd, xj, xk +data-types = V8SI, V8SF, V8SF + +/// lasx_xvpickve_d_f +name = lasx_xvpickve_d_f +asm-fmts = xd, xj, ui2 +data-types = V4DF, V4DF, UQI + +/// lasx_xvpickve_w_f +name = lasx_xvpickve_w_f +asm-fmts = xd, xj, ui3 +data-types = V8SF, V8SF, UQI + +/// lasx_xvrepli_b +name = lasx_xvrepli_b +asm-fmts = xd, si10 +data-types = V32QI, HI + +/// lasx_xvrepli_d +name = lasx_xvrepli_d +asm-fmts = xd, si10 +data-types = V4DI, HI + +/// lasx_xvrepli_h +name = lasx_xvrepli_h +asm-fmts = xd, si10 +data-types = V16HI, HI + +/// lasx_xvrepli_w +name = lasx_xvrepli_w +asm-fmts = xd, si10 +data-types = V8SI, HI + diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h b/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h new file mode 100644 index 000000000000..f13dd803abbe --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h @@ -0,0 +1,5342 @@ +/* + * https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/loongarch/lasxintrin.h;hb=4912418dc1b51d49aca5982c6a2061bb912b92b7 + */ + +/* LARCH Loongson ASX intrinsics include file. + + Copyright (C) 2018 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _GCC_LOONGSON_ASXINTRIN_H +#define _GCC_LOONGSON_ASXINTRIN_H 1 + +#if defined(__loongarch_asx) + +typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); +typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); +typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); +typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); +typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); +typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); +typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); +typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); +typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); +typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); +typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); +typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); +typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); +typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); +typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); +typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); +typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); +typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); +typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); +typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); +typedef float __m256 __attribute__ ((__vector_size__ (32), + __may_alias__)); +typedef long long __m256i __attribute__ ((__vector_size__ (32), + __may_alias__)); +typedef double __m256d __attribute__ ((__vector_size__ (32), + __may_alias__)); + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsll_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsll_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsll_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsll_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsll_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsll_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsll_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsll_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvslli_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvslli_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslli_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvslli_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsra_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsra_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsra_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsra_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsra_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsra_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsra_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsra_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsrai_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsrai_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsrai_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvsrai_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrar_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrar_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrar_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrar_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrar_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrar_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrar_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrar_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsrari_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsrari_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsrari_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvsrari_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrl_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrl_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrl_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrl_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrl_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrl_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrl_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrl_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsrli_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsrli_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsrli_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvsrli_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlr_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlr_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlr_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlr_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlr_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlr_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlr_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlr_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsrlri_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsrlri_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsrlri_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvsrlri_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitclr_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitclr_b ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitclr_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitclr_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitclr_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitclr_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitclr_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitclr_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvbitclri_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ +#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvbitclri_h ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ +#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvbitclri_w ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ +#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvbitclri_d ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitset_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitset_b ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitset_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitset_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitset_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitset_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitset_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitset_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvbitseti_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ +#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvbitseti_h ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ +#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvbitseti_w ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ +#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvbitseti_d ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitrev_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitrev_b ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitrev_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitrev_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitrev_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitrev_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitrev_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvbitrev_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvbitrevi_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ +#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvbitrevi_h ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ +#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvbitrevi_w ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ +#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvbitrevi_d ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadd_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadd_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadd_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadd_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadd_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadd_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadd_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadd_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvaddi_bu ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvaddi_hu ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvaddi_wu ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvaddi_du ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsub_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsub_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsub_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsub_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsub_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsub_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsub_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsub_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsubi_bu ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsubi_hu ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsubi_wu ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsubi_du ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V32QI, V32QI, QI. */ +#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V16HI, V16HI, QI. */ +#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V8SI, V8SI, QI. */ +#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V4DI, V4DI, QI. */ +#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmax_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmax_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_bu ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ +#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_hu ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ +#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_wu ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ +#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmaxi_du ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V32QI, V32QI, QI. */ +#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V16HI, V16HI, QI. */ +#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V8SI, V8SI, QI. */ +#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V4DI, V4DI, QI. */ +#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmin_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmin_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_bu ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ +#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_hu ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ +#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_wu ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ +#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvmini_du ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvseq_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvseq_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvseq_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvseq_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvseq_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvseq_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvseq_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvseq_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V32QI, V32QI, QI. */ +#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvseqi_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V16HI, V16HI, QI. */ +#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvseqi_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V8SI, V8SI, QI. */ +#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvseqi_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V4DI, V4DI, QI. */ +#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvseqi_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V32QI, V32QI, QI. */ +#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V16HI, V16HI, QI. */ +#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V8SI, V8SI, QI. */ +#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V4DI, V4DI, QI. */ +#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvslt_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvslt_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, UV32QI, UQI. */ +#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_bu ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, UV16HI, UQI. */ +#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_hu ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, UV8SI, UQI. */ +#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_wu ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V4DI, UV4DI, UQI. */ +#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslti_du ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V32QI, V32QI, QI. */ +#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V16HI, V16HI, QI. */ +#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V8SI, V8SI, QI. */ +#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, si5. */ +/* Data types in instruction templates: V4DI, V4DI, QI. */ +#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsle_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsle_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, UV32QI, UQI. */ +#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_bu ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, UV16HI, UQI. */ +#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_hu ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, UV8SI, UQI. */ +#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_wu ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V4DI, UV4DI, UQI. */ +#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvslei_du ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_bu ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ +#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_hu ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ +#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_wu ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ +#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvsat_du ((v4u64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadda_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadda_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadda_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadda_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadda_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadda_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadda_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadda_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsadd_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsadd_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavg_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavg_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvavgr_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvavgr_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssub_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssub_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvabsd_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvabsd_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmul_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmul_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmul_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmul_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmul_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmul_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmul_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmul_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmadd_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmadd_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmadd_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmadd_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmadd_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmadd_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmadd_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmadd_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmsub_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmsub_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmsub_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmsub_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmsub_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmsub_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmsub_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmsub_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvdiv_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvdiv_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_hu_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_hu_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_wu_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_wu_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_du_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_du_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_hu_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_hu_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_wu_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_wu_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_du_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_du_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmod_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmod_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvrepl128vei_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvrepl128vei_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui2. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) \ + ((__m256i)__builtin_lasx_xvrepl128vei_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui1. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) \ + ((__m256i)__builtin_lasx_xvrepl128vei_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickev_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickev_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickev_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickev_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickev_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickev_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickev_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickev_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickod_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickod_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickod_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickod_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickod_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickod_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpickod_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpickod_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvh_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvh_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvh_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvh_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvh_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvh_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvh_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvh_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvl_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvl_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvl_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvl_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvl_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvl_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvilvl_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvilvl_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackev_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackev_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackev_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackev_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackev_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackev_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackev_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackev_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackod_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackod_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackod_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackod_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackod_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackod_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpackod_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvpackod_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvshuf_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvshuf_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvshuf_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvshuf_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvshuf_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvshuf_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvshuf_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvshuf_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvand_v (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvand_v ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvandi_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvor_v (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvor_v ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvori_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvnor_v (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvnor_v ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvnori_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvxor_v (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvxor_v ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ +#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvxori_b ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvbitsel_v (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvbitsel_v ((v32u8)_1, (v32u8)_2, (v32u8)_3); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, USI. */ +#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvbitseli_b ((v32u8)(_1), (v32u8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V32QI, V32QI, USI. */ +#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvshuf4i_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V16HI, V16HI, USI. */ +#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvshuf4i_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V8SI, V8SI, USI. */ +#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvshuf4i_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, rj. */ +/* Data types in instruction templates: V32QI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplgr2vr_b (int _1) +{ + return (__m256i)__builtin_lasx_xvreplgr2vr_b ((int)_1); +} + +/* Assembly instruction format: xd, rj. */ +/* Data types in instruction templates: V16HI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplgr2vr_h (int _1) +{ + return (__m256i)__builtin_lasx_xvreplgr2vr_h ((int)_1); +} + +/* Assembly instruction format: xd, rj. */ +/* Data types in instruction templates: V8SI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplgr2vr_w (int _1) +{ + return (__m256i)__builtin_lasx_xvreplgr2vr_w ((int)_1); +} + +/* Assembly instruction format: xd, rj. */ +/* Data types in instruction templates: V4DI, DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplgr2vr_d (long int _1) +{ + return (__m256i)__builtin_lasx_xvreplgr2vr_d ((long int)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpcnt_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvpcnt_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpcnt_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvpcnt_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpcnt_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvpcnt_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvpcnt_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvpcnt_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclo_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclo_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclo_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclo_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclo_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclo_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclo_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclo_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclz_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclz_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclz_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclz_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclz_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclz_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvclz_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvclz_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfadd_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfadd_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfadd_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfadd_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfsub_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfsub_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfsub_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfsub_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmul_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfmul_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmul_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfmul_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfdiv_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfdiv_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfdiv_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfdiv_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcvt_h_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcvt_h_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfcvt_s_d (__m256d _1, __m256d _2) +{ + return (__m256)__builtin_lasx_xvfcvt_s_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmin_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfmin_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmin_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfmin_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmina_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfmina_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmina_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfmina_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmax_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfmax_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmax_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfmax_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmaxa_s (__m256 _1, __m256 _2) +{ + return (__m256)__builtin_lasx_xvfmaxa_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmaxa_d (__m256d _1, __m256d _2) +{ + return (__m256d)__builtin_lasx_xvfmaxa_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfclass_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvfclass_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfclass_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvfclass_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfsqrt_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfsqrt_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfsqrt_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfsqrt_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrecip_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrecip_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrecip_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrecip_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrint_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrint_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrint_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrint_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrsqrt_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrsqrt_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrsqrt_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrsqrt_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvflogb_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvflogb_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvflogb_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvflogb_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfcvth_s_h (__m256i _1) +{ + return (__m256)__builtin_lasx_xvfcvth_s_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfcvth_d_s (__m256 _1) +{ + return (__m256d)__builtin_lasx_xvfcvth_d_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfcvtl_s_h (__m256i _1) +{ + return (__m256)__builtin_lasx_xvfcvtl_s_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfcvtl_d_s (__m256 _1) +{ + return (__m256d)__builtin_lasx_xvfcvtl_d_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftint_w_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftint_w_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftint_l_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftint_l_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftint_wu_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftint_wu_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftint_lu_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftint_lu_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrz_w_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrz_w_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrz_l_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftintrz_l_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrz_wu_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrz_wu_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrz_lu_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftintrz_lu_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvffint_s_w (__m256i _1) +{ + return (__m256)__builtin_lasx_xvffint_s_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvffint_d_l (__m256i _1) +{ + return (__m256d)__builtin_lasx_xvffint_d_l ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvffint_s_wu (__m256i _1) +{ + return (__m256)__builtin_lasx_xvffint_s_wu ((v8u32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvffint_d_lu (__m256i _1) +{ + return (__m256d)__builtin_lasx_xvffint_d_lu ((v4u64)_1); +} + +/* Assembly instruction format: xd, xj, rk. */ +/* Data types in instruction templates: V32QI, V32QI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve_b (__m256i _1, int _2) +{ + return (__m256i)__builtin_lasx_xvreplve_b ((v32i8)_1, (int)_2); +} + +/* Assembly instruction format: xd, xj, rk. */ +/* Data types in instruction templates: V16HI, V16HI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve_h (__m256i _1, int _2) +{ + return (__m256i)__builtin_lasx_xvreplve_h ((v16i16)_1, (int)_2); +} + +/* Assembly instruction format: xd, xj, rk. */ +/* Data types in instruction templates: V8SI, V8SI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve_w (__m256i _1, int _2) +{ + return (__m256i)__builtin_lasx_xvreplve_w ((v8i32)_1, (int)_2); +} + +/* Assembly instruction format: xd, xj, rk. */ +/* Data types in instruction templates: V4DI, V4DI, SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve_d (__m256i _1, int _2) +{ + return (__m256i)__builtin_lasx_xvreplve_d ((v4i64)_1, (int)_2); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvpermi_w ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvandn_v (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvandn_v ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvneg_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvneg_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvneg_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvneg_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvneg_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvneg_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvneg_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvneg_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmuh_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmuh_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V16HI, V32QI, UQI. */ +#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsllwil_h_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V8SI, V16HI, UQI. */ +#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsllwil_w_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V4DI, V8SI, UQI. */ +#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsllwil_d_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: UV16HI, UV32QI, UQI. */ +#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvsllwil_hu_bu ((v32u8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV8SI, UV16HI, UQI. */ +#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvsllwil_wu_hu ((v16u16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV4DI, UV8SI, UQI. */ +#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvsllwil_du_wu ((v8u32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsran_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsran_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsran_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsran_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsran_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsran_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssran_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssran_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssran_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssran_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssran_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssran_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssran_bu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssran_bu_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssran_hu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssran_hu_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssran_wu_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssran_wu_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrarn_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrarn_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrarn_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrarn_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrarn_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrarn_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrarn_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrarn_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrarn_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrarn_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrarn_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrarn_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrarn_bu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrarn_bu_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrarn_hu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrarn_hu_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrarn_wu_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrarn_wu_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrln_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrln_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrln_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrln_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrln_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrln_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrln_bu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrln_bu_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrln_hu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrln_hu_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrln_wu_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrln_wu_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlrn_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlrn_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlrn_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlrn_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsrlrn_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsrlrn_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrlrn_bu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrlrn_bu_h ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrlrn_hu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrlrn_hu_w ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrlrn_wu_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrlrn_wu_d ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, UQI. */ +#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvfrstpi_b ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, UQI. */ +#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvfrstpi_h ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfrstp_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvfrstp_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfrstp_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvfrstp_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvshuf4i_d ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvbsrl_v ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvbsll_v ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvextrins_b ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvextrins_h ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvextrins_w ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvextrins_d ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmskltz_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvmskltz_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmskltz_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvmskltz_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmskltz_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvmskltz_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmskltz_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvmskltz_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsigncov_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsigncov_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsigncov_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsigncov_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsigncov_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsigncov_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsigncov_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsigncov_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmadd_s (__m256 _1, __m256 _2, __m256 _3) +{ + return (__m256)__builtin_lasx_xvfmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmadd_d (__m256d _1, __m256d _2, __m256d _3) +{ + return (__m256d)__builtin_lasx_xvfmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfmsub_s (__m256 _1, __m256 _2, __m256 _3) +{ + return (__m256)__builtin_lasx_xvfmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfmsub_d (__m256d _1, __m256d _2, __m256d _3) +{ + return (__m256d)__builtin_lasx_xvfmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfnmadd_s (__m256 _1, __m256 _2, __m256 _3) +{ + return (__m256)__builtin_lasx_xvfnmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfnmadd_d (__m256d _1, __m256d _2, __m256d _3) +{ + return (__m256d)__builtin_lasx_xvfnmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfnmsub_s (__m256 _1, __m256 _2, __m256 _3) +{ + return (__m256)__builtin_lasx_xvfnmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); +} + +/* Assembly instruction format: xd, xj, xk, xa. */ +/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfnmsub_d (__m256d _1, __m256d _2, __m256d _3) +{ + return (__m256d)__builtin_lasx_xvfnmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrne_w_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrne_w_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrne_l_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftintrne_l_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrp_w_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrp_w_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrp_l_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftintrp_l_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrm_w_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrm_w_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrm_l_d (__m256d _1) +{ + return (__m256i)__builtin_lasx_xvftintrm_l_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftint_w_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvftint_w_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SF, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvffint_s_l (__m256i _1, __m256i _2) +{ + return (__m256)__builtin_lasx_xvffint_s_l ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrz_w_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvftintrz_w_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrp_w_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvftintrp_w_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrm_w_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvftintrm_w_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrne_w_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvftintrne_w_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftinth_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftinth_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintl_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintl_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvffinth_d_w (__m256i _1) +{ + return (__m256d)__builtin_lasx_xvffinth_d_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvffintl_d_w (__m256i _1) +{ + return (__m256d)__builtin_lasx_xvffintl_d_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrzh_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrzh_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrzl_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrzl_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrph_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrph_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrpl_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrpl_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrmh_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrmh_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrml_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrml_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrneh_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrneh_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvftintrnel_l_s (__m256 _1) +{ + return (__m256i)__builtin_lasx_xvftintrnel_l_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrintrne_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrintrne_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrintrne_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrintrne_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrintrz_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrintrz_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrintrz_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrintrz_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrintrp_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrintrp_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrintrp_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrintrp_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_xvfrintrm_s (__m256 _1) +{ + return (__m256)__builtin_lasx_xvfrintrm_s ((v8f32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_xvfrintrm_d (__m256d _1) +{ + return (__m256d)__builtin_lasx_xvfrintrm_d ((v4f64)_1); +} + +/* Assembly instruction format: xd, rj, si12. */ +/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ +#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) \ + ((__m256i)__builtin_lasx_xvld ((void *)(_1), (_2))) + +/* Assembly instruction format: xd, rj, si12. */ +/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ +#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) \ + ((void)__builtin_lasx_xvst ((v32i8)(_1), (void *)(_2), (_3))) + +/* Assembly instruction format: xd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI, UQI. */ +#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lasx_xvstelm_b ((v32i8)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: xd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V16HI, CVPOINTER, SI, UQI. */ +#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lasx_xvstelm_h ((v16i16)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: xd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V8SI, CVPOINTER, SI, UQI. */ +#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lasx_xvstelm_w ((v8i32)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: xd, rj, si8, idx. */ +/* Data types in instruction templates: VOID, V4DI, CVPOINTER, SI, UQI. */ +#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ + ((void)__builtin_lasx_xvstelm_d ((v4i64)(_1), (void *)(_2), (_3), (_4))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, UQI. */ +#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) \ + ((__m256i)__builtin_lasx_xvinsve0_w ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui2. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, UQI. */ +#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) \ + ((__m256i)__builtin_lasx_xvinsve0_d ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvpickve_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui2. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) \ + ((__m256i)__builtin_lasx_xvpickve_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrlrn_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrlrn_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrlrn_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrlrn_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrlrn_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrlrn_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrln_b_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrln_b_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrln_h_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrln_h_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvssrln_w_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvssrln_w_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvorn_v (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvorn_v ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, i13. */ +/* Data types in instruction templates: V4DI, HI. */ +#define __lasx_xvldi(/*i13*/ _1) \ + ((__m256i)__builtin_lasx_xvldi ((_1))) + +/* Assembly instruction format: xd, rj, rk. */ +/* Data types in instruction templates: V32QI, CVPOINTER, DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvldx (void * _1, long int _2) +{ + return (__m256i)__builtin_lasx_xvldx ((void *)_1, (long int)_2); +} + +/* Assembly instruction format: xd, rj, rk. */ +/* Data types in instruction templates: VOID, V32QI, CVPOINTER, DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +void __lasx_xvstx (__m256i _1, void * _2, long int _3) +{ + return (void)__builtin_lasx_xvstx ((v32i8)_1, (void *)_2, (long int)_3); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvextl_qu_du (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvextl_qu_du ((v4u64)_1); +} + +/* Assembly instruction format: xd, rj, ui3. */ +/* Data types in instruction templates: V8SI, V8SI, SI, UQI. */ +#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) \ + ((__m256i)__builtin_lasx_xvinsgr2vr_w ((v8i32)(_1), (int)(_2), (_3))) + +/* Assembly instruction format: xd, rj, ui2. */ +/* Data types in instruction templates: V4DI, V4DI, DI, UQI. */ +#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) \ + ((__m256i)__builtin_lasx_xvinsgr2vr_d ((v4i64)(_1), (long int)(_2), (_3))) + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve0_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvreplve0_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve0_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvreplve0_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve0_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvreplve0_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve0_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvreplve0_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvreplve0_q (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvreplve0_q ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_h_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_h_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_w_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_w_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_d_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_d_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_w_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_w_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_d_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_d_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_d_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_d_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_hu_bu (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_hu_bu ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_wu_hu (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_wu_hu ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_du_wu (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_du_wu ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_wu_bu (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_wu_bu ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_du_hu (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_du_hu ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_vext2xv_du_bu (__m256i _1) +{ + return (__m256i)__builtin_lasx_vext2xv_du_bu ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ + ((__m256i)__builtin_lasx_xvpermi_q ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui8. */ +/* Data types in instruction templates: V4DI, V4DI, USI. */ +#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) \ + ((__m256i)__builtin_lasx_xvpermi_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvperm_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvperm_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, rj, si12. */ +/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ +#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) \ + ((__m256i)__builtin_lasx_xvldrepl_b ((void *)(_1), (_2))) + +/* Assembly instruction format: xd, rj, si11. */ +/* Data types in instruction templates: V16HI, CVPOINTER, SI. */ +#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) \ + ((__m256i)__builtin_lasx_xvldrepl_h ((void *)(_1), (_2))) + +/* Assembly instruction format: xd, rj, si10. */ +/* Data types in instruction templates: V8SI, CVPOINTER, SI. */ +#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) \ + ((__m256i)__builtin_lasx_xvldrepl_w ((void *)(_1), (_2))) + +/* Assembly instruction format: xd, rj, si9. */ +/* Data types in instruction templates: V4DI, CVPOINTER, SI. */ +#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) \ + ((__m256i)__builtin_lasx_xvldrepl_d ((void *)(_1), (_2))) + +/* Assembly instruction format: rd, xj, ui3. */ +/* Data types in instruction templates: SI, V8SI, UQI. */ +#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) \ + ((int)__builtin_lasx_xvpickve2gr_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: rd, xj, ui3. */ +/* Data types in instruction templates: USI, V8SI, UQI. */ +#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) \ + ((unsigned int)__builtin_lasx_xvpickve2gr_wu ((v8i32)(_1), (_2))) + +/* Assembly instruction format: rd, xj, ui2. */ +/* Data types in instruction templates: DI, V4DI, UQI. */ +#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) \ + ((long int)__builtin_lasx_xvpickve2gr_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: rd, xj, ui2. */ +/* Data types in instruction templates: UDI, V4DI, UQI. */ +#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) \ + ((unsigned long int)__builtin_lasx_xvpickve2gr_du ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_q_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_q_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_d_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_d_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_w_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_w_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_h_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_h_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_q_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_q_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_d_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_d_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_w_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_w_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwev_h_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwev_h_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_q_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_q_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_d_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_d_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_w_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_w_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_h_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_h_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_q_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_q_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_d_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_d_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_w_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_w_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_h_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_h_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_q_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_q_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_d_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_d_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_w_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_w_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsubwod_h_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsubwod_h_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_d_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_d_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_w_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_w_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_h_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_h_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_q_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_q_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_d_wu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_d_wu ((v8u32)_1, (v8u32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_w_hu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_w_hu ((v16u16)_1, (v16u16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_h_bu (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_h_bu ((v32u8)_1, (v32u8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_d_wu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_d_wu_w ((v8u32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_w_hu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_w_hu_h ((v16u16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_h_bu_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_h_bu_b ((v32u8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_d_wu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_d_wu_w ((v8u32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_w_hu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_w_hu_h ((v16u16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_h_bu_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_h_bu_b ((v32u8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_d_wu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_d_wu_w ((v8u32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_w_hu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_w_hu_h ((v16u16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_h_bu_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_h_bu_b ((v32u8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_d_wu_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_d_wu_w ((v8u32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_w_hu_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_w_hu_h ((v16u16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_h_bu_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_h_bu_b ((v32u8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhaddw_qu_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhaddw_qu_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_q_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_q_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvhsubw_qu_du (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvhsubw_qu_du ((v4u64)_1, (v4u64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_q_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_d_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_w_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_h_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_q_du (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_d_wu (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_w_hu (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_h_bu (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_q_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_d_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_w_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_h_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_q_du (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_d_wu (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_w_hu (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_h_bu (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_q_du_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_d_wu_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_w_hu_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwev_h_bu_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_q_du_d (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_d_wu_w (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_w_hu_h (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmaddwod_h_bu_b (__m256i _1, __m256i _2, __m256i _3) +{ + return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvrotr_b (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvrotr_b ((v32i8)_1, (v32i8)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvrotr_h (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvrotr_h ((v16i16)_1, (v16i16)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvrotr_w (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvrotr_w ((v8i32)_1, (v8i32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvrotr_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvrotr_d ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvadd_q (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvadd_q ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvsub_q (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvsub_q ((v4i64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwev_q_du_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwev_q_du_d ((v4u64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvaddwod_q_du_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvaddwod_q_du_d ((v4u64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwev_q_du_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwev_q_du_d ((v4u64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmulwod_q_du_d (__m256i _1, __m256i _2) +{ + return (__m256i)__builtin_lasx_xvmulwod_q_du_d ((v4u64)_1, (v4i64)_2); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmskgez_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvmskgez_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V32QI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvmsknz_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvmsknz_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V16HI, V32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_h_b (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_h_b ((v32i8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V8SI, V16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_w_h (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_w_h ((v16i16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_d_w (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_d_w ((v8i32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_q_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_q_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV16HI, UV32QI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_hu_bu (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_hu_bu ((v32u8)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV8SI, UV16HI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_wu_hu (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_wu_hu ((v16u16)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV4DI, UV8SI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_du_wu (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_du_wu ((v8u32)_1); +} + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: UV4DI, UV4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvexth_qu_du (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvexth_qu_du ((v4u64)_1); +} + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V32QI, V32QI, UQI. */ +#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) \ + ((__m256i)__builtin_lasx_xvrotri_b ((v32i8)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V16HI, V16HI, UQI. */ +#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) \ + ((__m256i)__builtin_lasx_xvrotri_h ((v16i16)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V8SI, V8SI, UQI. */ +#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) \ + ((__m256i)__builtin_lasx_xvrotri_w ((v8i32)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V4DI, V4DI, UQI. */ +#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) \ + ((__m256i)__builtin_lasx_xvrotri_d ((v4i64)(_1), (_2))) + +/* Assembly instruction format: xd, xj. */ +/* Data types in instruction templates: V4DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvextl_q_d (__m256i _1) +{ + return (__m256i)__builtin_lasx_xvextl_q_d ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvsrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ +#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ +#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ +#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ +#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ +#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ +#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ +#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ +#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrlrni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvsrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvsrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvsrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvsrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvsrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvsrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvsrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvsrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ +#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ +#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ +#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ +#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrani_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ +#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ +#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ +#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ +#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui4. */ +/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ +#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui5. */ +/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ +#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui6. */ +/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ +#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) + +/* Assembly instruction format: xd, xj, ui7. */ +/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ +#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ + ((__m256i)__builtin_lasx_xvssrarni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV32QI. */ +#define __lasx_xbnz_b(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbnz_b ((v32u8)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV4DI. */ +#define __lasx_xbnz_d(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbnz_d ((v4u64)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV16HI. */ +#define __lasx_xbnz_h(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbnz_h ((v16u16)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV32QI. */ +#define __lasx_xbnz_v(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbnz_v ((v32u8)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV8SI. */ +#define __lasx_xbnz_w(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbnz_w ((v8u32)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV32QI. */ +#define __lasx_xbz_b(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbz_b ((v32u8)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV4DI. */ +#define __lasx_xbz_d(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbz_d ((v4u64)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV16HI. */ +#define __lasx_xbz_h(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbz_h ((v16u16)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV32QI. */ +#define __lasx_xbz_v(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbz_v ((v32u8)(_1))) + +/* Assembly instruction format: cd, xj. */ +/* Data types in instruction templates: SI, UV8SI. */ +#define __lasx_xbz_w(/*__m256i*/ _1) \ + ((int)__builtin_lasx_xbz_w ((v8u32)(_1))) + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_caf_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_caf_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_caf_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_caf_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_ceq_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_ceq_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_ceq_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_ceq_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cle_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cle_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cle_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cle_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_clt_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_clt_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_clt_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_clt_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cne_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cne_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cne_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cne_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cor_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cor_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cor_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cor_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cueq_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cueq_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cueq_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cueq_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cule_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cule_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cule_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cule_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cult_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cult_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cult_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cult_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cun_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cun_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cune_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cune_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cune_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cune_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_cun_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_cun_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_saf_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_saf_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_saf_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_saf_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_seq_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_seq_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_seq_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_seq_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sle_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sle_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sle_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sle_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_slt_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_slt_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_slt_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_slt_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sne_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sne_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sne_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sne_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sor_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sor_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sor_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sor_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sueq_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sueq_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sueq_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sueq_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sule_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sule_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sule_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sule_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sult_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sult_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sult_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sult_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sun_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sun_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V4DI, V4DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sune_d (__m256d _1, __m256d _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sune_d ((v4f64)_1, (v4f64)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sune_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sune_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, xk. */ +/* Data types in instruction templates: V8SI, V8SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_xvfcmp_sun_s (__m256 _1, __m256 _2) +{ + return (__m256i)__builtin_lasx_xvfcmp_sun_s ((v8f32)_1, (v8f32)_2); +} + +/* Assembly instruction format: xd, xj, ui2. */ +/* Data types in instruction templates: V4DF, V4DF, UQI. */ +#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) \ + ((__m256d)__builtin_lasx_xvpickve_d_f ((v4f64)(_1), (_2))) + +/* Assembly instruction format: xd, xj, ui3. */ +/* Data types in instruction templates: V8SF, V8SF, UQI. */ +#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) \ + ((__m256)__builtin_lasx_xvpickve_w_f ((v8f32)(_1), (_2))) + +/* Assembly instruction format: xd, si10. */ +/* Data types in instruction templates: V32QI, HI. */ +#define __lasx_xvrepli_b(/*si10*/ _1) \ + ((__m256i)__builtin_lasx_xvrepli_b ((_1))) + +/* Assembly instruction format: xd, si10. */ +/* Data types in instruction templates: V4DI, HI. */ +#define __lasx_xvrepli_d(/*si10*/ _1) \ + ((__m256i)__builtin_lasx_xvrepli_d ((_1))) + +/* Assembly instruction format: xd, si10. */ +/* Data types in instruction templates: V16HI, HI. */ +#define __lasx_xvrepli_h(/*si10*/ _1) \ + ((__m256i)__builtin_lasx_xvrepli_h ((_1))) + +/* Assembly instruction format: xd, si10. */ +/* Data types in instruction templates: V8SI, HI. */ +#define __lasx_xvrepli_w(/*si10*/ _1) \ + ((__m256i)__builtin_lasx_xvrepli_w ((_1))) + +#endif /* defined(__loongarch_asx). */ +#endif /* _GCC_LOONGSON_ASXINTRIN_H. */