From ab2c74fd7dd229d8c1ccb9957c7104ebdaf49f0f Mon Sep 17 00:00:00 2001 From: Radovan Birdic Date: Thu, 4 Apr 2019 08:59:43 +0000 Subject: [PATCH 1/3] Implement all MSA Intrinsics --- crates/core_arch/src/mips/msa.rs | 16689 +++++++++++++++++++++- crates/core_arch/src/mips/msa/macros.rs | 4347 ++++++ crates/simd-test-macro/src/lib.rs | 4 +- 3 files changed, 21005 insertions(+), 35 deletions(-) create mode 100644 crates/core_arch/src/mips/msa/macros.rs diff --git a/crates/core_arch/src/mips/msa.rs b/crates/core_arch/src/mips/msa.rs index 0299ee7c51..ad833fa265 100644 --- a/crates/core_arch/src/mips/msa.rs +++ b/crates/core_arch/src/mips/msa.rs @@ -7,56 +7,16679 @@ #[cfg(test)] use stdsimd_test::assert_instr; +use core_arch::simd::*; -types! { - /// MIPS-specific 128-bit wide vector of 16 packed `i8`. - pub struct i8x16( - i8, i8, i8, i8, i8, i8, i8, i8, - i8, i8, i8, i8, i8, i8, i8, i8, - ); -} + +#[macro_use] +mod macros; #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.mips.add.a.b"] fn msa_add_a_b(a: i8x16, b: i8x16) -> i8x16; -} + #[link_name = "llvm.mips.add.a.h"] + fn msa_add_a_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.add.a.w"] + fn msa_add_a_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.add.a.d"] + fn msa_add_a_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.adds.a.b"] + fn msa_adds_a_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.adds.a.h"] + fn msa_adds_a_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.adds.a.w"] + fn msa_adds_a_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.adds.a.d"] + fn msa_adds_a_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.adds.s.b"] + fn msa_adds_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.adds.s.h"] + fn msa_adds_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.adds.s.w"] + fn msa_adds_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.adds.s.d"] + fn msa_adds_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.adds.u.b"] + fn msa_adds_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.adds.u.h"] + fn msa_adds_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.adds.u.w"] + fn msa_adds_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.adds.u.d"] + fn msa_adds_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.addv.b"] + fn msa_addv_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.addv.h"] + fn msa_addv_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.addv.w"] + fn msa_addv_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.addv.d"] + fn msa_addv_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.addvi.b"] + fn msa_addvi_b(a: i8x16, b: u32) -> i8x16; + #[link_name = "llvm.mips.addvi.h"] + fn msa_addvi_h(a: i16x8, b: u32) -> i16x8; + #[link_name = "llvm.mips.addvi.w"] + fn msa_addvi_w(a: i32x4, b: u32) -> i32x4; + #[link_name = "llvm.mips.addvi.d"] + fn msa_addvi_d(a: i64x2, b: u32) -> i64x2; + #[link_name = "llvm.mips.and.v"] + fn msa_and_v(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.andi.b"] + fn msa_andi_b(a: u8x16, b: u32) -> u8x16; + #[link_name = "llvm.mips.asub.s.b"] + fn msa_asub_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.asub.s.h"] + fn msa_asub_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.asub.s.w"] + fn msa_asub_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.asub.s.d"] + fn msa_asub_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.asub.u.b"] + fn msa_asub_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.asub.u.h"] + fn msa_asub_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.asub.u.w"] + fn msa_asub_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.asub.u.d"] + fn msa_asub_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.ave.s.b"] + fn msa_ave_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.ave.s.h"] + fn msa_ave_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.ave.s.w"] + fn msa_ave_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.ave.s.d"] + fn msa_ave_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.ave.u.b"] + fn msa_ave_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.ave.u.h"] + fn msa_ave_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.ave.u.w"] + fn msa_ave_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.ave.u.d"] + fn msa_ave_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.aver.s.b"] + fn msa_aver_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.aver.s.h"] + fn msa_aver_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.aver.s.w"] + fn msa_aver_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.aver.s.d"] + fn msa_aver_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.aver.s.b"] + fn msa_aver_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.aver.s.h"] + fn msa_aver_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.aver.s.w"] + fn msa_aver_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.aver.s.d"] + fn msa_aver_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.bclr.b"] + fn msa_bclr_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.bclr.h"] + fn msa_bclr_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.bclr.w"] + fn msa_bclr_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.bclr.d"] + fn msa_bclr_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.bclri.b"] + fn msa_bclri_b(a: u8x16, b: i32) -> u8x16; //imm0_7 + #[link_name = "llvm.mips.bclri.h"] + fn msa_bclri_h(a: u16x8, b: i32) -> u16x8; //imm0_15 + #[link_name = "llvm.mips.bclri.w"] + fn msa_bclri_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + #[link_name = "llvm.mips.bclri.d"] + fn msa_bclri_d(a: u64x2, b: i32) -> u64x2; //imm0_63 + #[link_name = "llvm.mips.binsl.b"] + fn msa_binsl_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + #[link_name = "llvm.mips.binsl.h"] + fn msa_binsl_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8; + #[link_name = "llvm.mips.binsl.w"] + fn msa_binsl_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4; + #[link_name = "llvm.mips.binsl.d"] + fn msa_binsl_d(a: u64x2, b: u64x2, c: u64x2) -> u64x2; + #[link_name = "llvm.mips.binsli.b"] + fn msa_binsli_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + #[link_name = "llvm.mips.binsli.h"] + fn msa_binsli_h(a: u16x8, b: u16x8, c: i32) -> u16x8; + #[link_name = "llvm.mips.binsli.w"] + fn msa_binsli_w(a: u32x4, b: u32x4, c: i32) -> u32x4; + #[link_name = "llvm.mips.binsli.d"] + fn msa_binsli_d(a: u64x2, b: u64x2, c: i32) -> u64x2; + #[link_name = "llvm.mips.binsr.b"] + fn msa_binsr_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + #[link_name = "llvm.mips.binsr.h"] + fn msa_binsr_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8; + #[link_name = "llvm.mips.binsr.w"] + fn msa_binsr_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4; + #[link_name = "llvm.mips.binsr.d"] + fn msa_binsr_d(a: u64x2, b: u64x2, c: u64x2) -> u64x2; + #[link_name = "llvm.mips.binsri.b"] + fn msa_binsri_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + #[link_name = "llvm.mips.binsri.h"] + fn msa_binsri_h(a: u16x8, b: u16x8, c: i32) -> u16x8; + #[link_name = "llvm.mips.binsri.w"] + fn msa_binsri_w(a: u32x4, b: u32x4, c: i32) -> u32x4; + #[link_name = "llvm.mips.binsri.d"] + fn msa_binsri_d(a: u64x2, b: u64x2, c: i32) -> u64x2; + #[link_name = "llvm.mips.bmnz.v"] + fn msa_bmnz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + #[link_name = "llvm.mips.bmnzi.b"] + fn msa_bmnzi_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + #[link_name = "llvm.mips.bmz.v"] + fn msa_bmz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + #[link_name = "llvm.mips.bmzi.b"] + fn msa_bmzi_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + #[link_name = "llvm.mips.bneg.b"] + fn msa_bneg_b(a:u8x16, b:u8x16) -> u8x16; + #[link_name = "llvm.mips.bneg.h"] + fn msa_bneg_h(a:u16x8, b:u16x8) -> u16x8; + #[link_name = "llvm.mips.bneg.w"] + fn msa_bneg_w(a:u32x4, b:u32x4) -> u32x4; + #[link_name = "llvm.mips.bneg.d"] + fn msa_bneg_d(a:u64x2, b:u64x2) -> u64x2; + #[link_name = "llvm.mips.bnegi.b"] + fn msa_bnegi_b(a: u8x16, b:i32) -> u8x16; + #[link_name = "llvm.mips.bnegi.h"] + fn msa_bnegi_h(a: u16x8, b:i32) -> u16x8; + #[link_name = "llvm.mips.bnegi.w"] + fn msa_bnegi_w(a: u32x4, b:i32) -> u32x4; + #[link_name = "llvm.mips.bnegi.d"] + fn msa_bnegi_d(a: u64x2, b:i32) -> u64x2; + #[link_name = "llvm.mips.bnz.b"] + fn msa_bnz_b(a: u8x16) -> i32; + #[link_name = "llvm.mips.bnz.h"] + fn msa_bnz_h(a: u16x8) -> i32; + #[link_name = "llvm.mips.bnz.w"] + fn msa_bnz_w(a: u32x4) -> i32; + #[link_name = "llvm.mips.bnz.d"] + fn msa_bnz_d(a: u64x2) -> i32; + #[link_name = "llvm.mips.bnz.v"] + fn msa_bnz_v(a: u8x16) -> i32; + #[link_name = "llvm.mips.bsel.v"] + fn msa_bsel_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + #[link_name = "llvm.mips.bseli.b"] + fn msa_bseli_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + #[link_name = "llvm.mips.bset.b"] + fn msa_bset_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.bset.h"] + fn msa_bset_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.bset.w"] + fn msa_bset_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.bset.d"] + fn msa_bset_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.bseti.b"] + fn msa_bseti_b(a: u8x16, b: i32) -> u8x16; + #[link_name = "llvm.mips.bseti.h"] + fn msa_bseti_h(a: u16x8, b: i32) -> u16x8; + #[link_name = "llvm.mips.bseti.w"] + fn msa_bseti_w(a: u32x4, b: i32) -> u32x4; + #[link_name = "llvm.mips.bseti.d"] + fn msa_bseti_d(a: u64x2, b: i32) -> u64x2; + #[link_name = "llvm.mips.bz.b"] + fn msa_bz_b(a: u8x16) -> i32; + #[link_name = "llvm.mips.bz.h"] + fn msa_bz_h(a: u16x8) -> i32; + #[link_name = "llvm.mips.bz.w"] + fn msa_bz_w(a: u32x4) -> i32; + #[link_name = "llvm.mips.bz.d"] + fn msa_bz_d(a: u64x2) -> i32; + #[link_name = "llvm.mips.bz.v"] + fn msa_bz_v(a: u8x16) -> i32; + #[link_name = "llvm.mips.ceq.b"] + fn msa_ceq_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.ceq.h"] + fn msa_ceq_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.ceq.w"] + fn msa_ceq_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.ceq.d"] + fn msa_ceq_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.ceqi.b"] + fn msa_ceqi_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + #[link_name = "llvm.mips.ceqi.h"] + fn msa_ceqi_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + #[link_name = "llvm.mips.ceqi.w"] + fn msa_ceqi_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + #[link_name = "llvm.mips.ceqi.d"] + fn msa_ceqi_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + #[link_name = "llvm.mips.cfcmsa"] + fn msa_cfcmsa(a: i32) -> i32; + #[link_name = "llvm.mips.cle.s.b"] + fn msa_cle_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.cle.s.h"] + fn msa_cle_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.cle.s.w"] + fn msa_cle_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.cle.s.d"] + fn msa_cle_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.cle.u.b"] + fn msa_cle_u_b(a: u8x16, b: u8x16) -> i8x16; + #[link_name = "llvm.mips.cle.u.h"] + fn msa_cle_u_h(a: u16x8, b: u16x8) -> i16x8; + #[link_name = "llvm.mips.cle.u.w"] + fn msa_cle_u_w(a: u32x4, b: u32x4) -> i32x4; + #[link_name = "llvm.mips.cle.u.d"] + fn msa_cle_u_d(a: u64x2, b: u64x2) -> i64x2; + #[link_name = "llvm.mips.clei.s.b"] + fn msa_clei_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + #[link_name = "llvm.mips.clei.s.h"] + fn msa_clei_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + #[link_name = "llvm.mips.clei.s.w"] + fn msa_clei_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + #[link_name = "llvm.mips.clei.s.d"] + fn msa_clei_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + #[link_name = "llvm.mips.clei.u.b"] + fn msa_clei_u_b(a: u8x16, b: i32) -> i8x16; //imm0_31 + #[link_name = "llvm.mips.clei.u.h"] + fn msa_clei_u_h(a: u16x8, b: i32) -> i16x8; //imm0_31 + #[link_name = "llvm.mips.clei.u.w"] + fn msa_clei_u_w(a: u32x4, b: i32) -> i32x4; //imm0_31 + #[link_name = "llvm.mips.clei.u.d"] + fn msa_clei_u_d(a: u64x2, b: i32) -> i64x2; //imm0_31 + #[link_name = "llvm.mips.clt.s.b"] + fn msa_clt_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.clt.s.h"] + fn msa_clt_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.clt.s.w"] + fn msa_clt_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.clt.s.d"] + fn msa_clt_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.clt.u.b"] + fn msa_clt_u_b(a: u8x16, b: u8x16) -> i8x16; + #[link_name = "llvm.mips.clt.u.h"] + fn msa_clt_u_h(a: u16x8, b: u16x8) -> i16x8; + #[link_name = "llvm.mips.clt.u.w"] + fn msa_clt_u_w(a: u32x4, b: u32x4) -> i32x4; + #[link_name = "llvm.mips.clt.u.d"] + fn msa_clt_u_d(a: u64x2, b: u64x2) -> i64x2; + #[link_name = "llvm.mips.clti.s.b"] + fn msa_clti_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + #[link_name = "llvm.mips.clti.s.h"] + fn msa_clti_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + #[link_name = "llvm.mips.clti.s.w"] + fn msa_clti_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + #[link_name = "llvm.mips.clti.s.d"] + fn msa_clti_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + #[link_name = "llvm.mips.clti.u.b"] + fn msa_clti_u_b(a: u8x16, b: i32) -> i8x16; + #[link_name = "llvm.mips.clti.u.h"] + fn msa_clti_u_h(a: u16x8, b: i32) -> i16x8; + #[link_name = "llvm.mips.clti.u.w"] + fn msa_clti_u_w(a: u32x4, b: i32) -> i32x4; + #[link_name = "llvm.mips.clti.u.d"] + fn msa_clti_u_d(a: u64x2, b: i32) -> i64x2; + #[link_name = "llvm.mips.copy.s.b"] + fn msa_copy_s_b(a: i8x16, b: i32) -> i32; //imm0_15 + #[link_name = "llvm.mips.copy.s.h"] + fn msa_copy_s_h(a: i16x8, b: i32) -> i32; //imm0_7 + #[link_name = "llvm.mips.copy.s.w"] + fn msa_copy_s_w(a: i32x4, b: i32) -> i32; //imm0_3 + #[link_name = "llvm.mips.copy.s.d"] + fn msa_copy_s_d(a: i64x2, b: i32) -> i64; //imm0_1 + #[link_name = "llvm.mips.copy.u.b"] + fn msa_copy_u_b(a: i8x16, b: i32) -> u32; //imm0_15 + #[link_name = "llvm.mips.copy.u.h"] + fn msa_copy_u_h(a: i16x8, b: i32) -> u32; //imm0_7 + #[link_name = "llvm.mips.copy.u.w"] + fn msa_copy_u_w(a: i32x4, b: i32) -> u32; //imm0_3 + #[link_name = "llvm.mips.copy.u.d"] + fn msa_copy_u_d(a: i64x2, b: i32) -> u64; //imm0_1 + #[link_name = "llvm.mips.ctcmsa"] + fn msa_ctcmsa(imm5: i32, a: i32) -> (); + #[link_name = "llvm.mips.div.s.b"] + fn msa_div_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.div.s.h"] + fn msa_div_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.div.s.w"] + fn msa_div_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.div.s.d"] + fn msa_div_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.div.u.b"] + fn msa_div_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.div.u.h"] + fn msa_div_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.div.u.w"] + fn msa_div_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.div.u.d"] + fn msa_div_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.dotp.s.h"] + fn msa_dotp_s_h(a: i8x16, b : i8x16) -> i16x8; + #[link_name = "llvm.mips.dotp.s.w"] + fn msa_dotp_s_w(a: i16x8, b : i16x8) -> i32x4; + #[link_name = "llvm.mips.dotp.s.d"] + fn msa_dotp_s_d(a: i32x4, b : i32x4) -> i64x2; + #[link_name = "llvm.mips.dotp.u.h"] + fn msa_dotp_u_h(a: u8x16, b : u8x16) -> u16x8; + #[link_name = "llvm.mips.dotp.u.w"] + fn msa_dotp_u_w(a: u16x8, b : u16x8) -> u32x4; + #[link_name = "llvm.mips.dotp.u.d"] + fn msa_dotp_u_d(a: u32x4, b : u32x4) -> u64x2; + #[link_name = "llvm.mips.dpadd.s.h"] + fn msa_dpadd_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8; + #[link_name = "llvm.mips.dpadd.s.w"] + fn msa_dpadd_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4; + #[link_name = "llvm.mips.dpadd.s.d"] + fn msa_dpadd_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2; + #[link_name = "llvm.mips.dpadd.s.h"] + fn msa_dpadd_u_h(a: u16x8, b: u8x16, c: u8x16) -> u16x8; + #[link_name = "llvm.mips.dpadd.u.w"] + fn msa_dpadd_u_w(a: u32x4, b: u16x8, c: u16x8) -> u32x4; + #[link_name = "llvm.mips.dpadd.u.d"] + fn msa_dpadd_u_d(a: u64x2, b: u32x4, c: u32x4) -> u64x2; + #[link_name = "llvm.mips.dpsub.s.h"] + fn msa_dpsub_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8; + #[link_name = "llvm.mips.dpsub.s.w"] + fn msa_dpsub_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4; + #[link_name = "llvm.mips.dpsub.s.d"] + fn msa_dpsub_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2; + #[link_name = "llvm.mips.dpsub.u.h"] + fn msa_dpsub_u_h(a: i16x8, b: u8x16, c: u8x16) -> i16x8; + #[link_name = "llvm.mips.dpsub.u.w"] + fn msa_dpsub_u_w(a: i32x4, b: u16x8, c: u16x8) -> i32x4; + #[link_name = "llvm.mips.dpsub.u.d"] + fn msa_dpsub_u_d(a: i64x2, b: u32x4, c: u32x4) -> i64x2; + #[link_name = "llvm.mips.fadd.w"] + fn msa_fadd_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fadd.d"] + fn msa_fadd_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.fcaf.w"] + fn msa_fcaf_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcaf.d"] + fn msa_fcaf_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fceq.w"] + fn msa_fceq_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fceq.d"] + fn msa_fceq_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fclass.w"] + fn msa_fclass_w(a: f32x4) -> i32x4; + #[link_name = "llvm.mips.fclass.d"] + fn msa_fclass_d(a: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcle.w"] + fn msa_fcle_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcle.d"] + fn msa_fcle_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fclt.w"] + fn msa_fclt_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fclt.d"] + fn msa_fclt_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcne.w"] + fn msa_fcne_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcne.d"] + fn msa_fcne_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcor.w"] + fn msa_fcor_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcor.d"] + fn msa_fcor_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcueq.w"] + fn msa_fcueq_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcueq.d"] + fn msa_fcueq_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcule.w"] + fn msa_fcule_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcule.d"] + fn msa_fcule_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcult.w"] + fn msa_fcult_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcult.d"] + fn msa_fcult_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcun.w"] + fn msa_fcun_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcun.d"] + fn msa_fcun_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fcune.w"] + fn msa_fcune_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fcune.d"] + fn msa_fcune_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fdiv.w"] + fn msa_fdiv_w(a: f32x4, b:f32x4) -> f32x4; + #[link_name = "llvm.mips.fdiv.d"] + fn msa_fdiv_d(a: f64x2, b:f64x2) -> f64x2; + // FIXME: 16-bit floats + // #[link_name = "llvm.mips.fexdo.h"] + // fn msa_fexdo_h(a: f32x4, b: f32x4) -> f16x8; + #[link_name = "llvm.mips.fexdo.w"] + fn msa_fexdo_w(a: f64x2, b: f64x2) -> f32x4; + #[link_name = "llvm.mips.fexp2.w"] + fn msa_fexp2_w(a: f32x4, b: i32x4) -> f32x4; + #[link_name = "llvm.mips.fexp2.d"] + fn msa_fexp2_d(a: f64x2, b: i64x2) -> f64x2; + #[link_name = "llvm.mips.fexupl.w"] + // FIXME: 16-bit floats + // fn msa_fexupl_w(a: f16x8) -> f32x4; + #[link_name = "llvm.mips.fexupl.d"] + fn msa_fexupl_d(a: f32x4) -> f64x2; + // FIXME: 16-bit floats + // #[link_name = "llvm.mips.fexupr.w"] + // fn msa_fexupr_w(a: f16x8) -> f32x4; + #[link_name = "llvm.mips.fexupr.d"] + fn msa_fexupr_d(a: f32x4) -> f64x2; + #[link_name = "llvm.mips.ffint.s.w"] + fn msa_ffint_s_w(a: i32x4) -> f32x4; + #[link_name = "llvm.mips.ffint.s.d"] + fn msa_ffint_s_d(a: i64x2) -> f64x2; + #[link_name = "llvm.mips.ffint.u.w"] + fn msa_ffint_u_w(a: u32x4) -> f32x4; + #[link_name = "llvm.mips.ffint.u.d"] + fn msa_ffint_u_d(a: u64x2) -> f64x2; + #[link_name = "llvm.mips.ffql.w"] + fn msa_ffql_w(a: i16x8) -> f32x4; + #[link_name = "llvm.mips.ffql.d"] + fn msa_ffql_d(a: i32x4) -> f64x2; + #[link_name = "llvm.mips.ffqr.w"] + fn msa_ffqr_w(a: i16x8) -> f32x4; + #[link_name = "llvm.mips.ffqr.d"] + fn msa_ffqr_d(a: i32x4) -> f64x2; + #[link_name = "llvm.mips.fill.b"] + fn msa_fill_b(a: i32) -> i8x16; + #[link_name = "llvm.mips.fill.h"] + fn msa_fill_h(a: i32) -> i16x8; + #[link_name = "llvm.mips.fill.w"] + fn msa_fill_w(a: i32) -> i32x4; + #[link_name = "llvm.mips.fill.d"] + fn msa_fill_d(a: i64) -> i64x2; + #[link_name = "llvm.mips.flog2.w"] + fn msa_flog2_w(a: f32x4) -> f32x4; + #[link_name = "llvm.mips.flog2.d"] + fn msa_flog2_d(a: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmadd.w"] + fn msa_fmadd_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmadd.d"] + fn msa_fmadd_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmax.w"] + fn msa_fmax_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmax.d"] + fn msa_fmax_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmax.a.w"] + fn msa_fmax_a_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmax.a.d"] + fn msa_fmax_a_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmin.w"] + fn msa_fmin_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmin.d"] + fn msa_fmin_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmin.a.w"] + fn msa_fmin_a_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmin.a.d"] + fn msa_fmin_a_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmsub.w"] + fn msa_fmsub_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmsub.d"] + fn msa_fmsub_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2; + #[link_name = "llvm.mips.fmul.w"] + fn msa_fmul_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fmul.d"] + fn msa_fmul_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.frint.w"] + fn msa_frint_w(a: f32x4) -> f32x4; + #[link_name = "llvm.mips.frint.d"] + fn msa_frint_d(a: f64x2) -> f64x2; + #[link_name = "llvm.mips.frcp.w"] + fn msa_frcp_w(a: f32x4) -> f32x4; + #[link_name = "llvm.mips.frcp.d"] + fn msa_frcp_d(a: f64x2) -> f64x2; + #[link_name = "llvm.mips.frsqrt.w"] + fn msa_frsqrt_w(a: f32x4) -> f32x4; + #[link_name = "llvm.mips.frsqrt.d"] + fn msa_frsqrt_d(a: f64x2) -> f64x2; + #[link_name = "llvm.mips.fsaf.w"] + fn msa_fsaf_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsaf.d"] + fn msa_fsaf_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fseq.w"] + fn msa_fseq_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fseq.d"] + fn msa_fseq_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsle.w"] + fn msa_fsle_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsle.d"] + fn msa_fsle_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fslt.w"] + fn msa_fslt_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fslt.d"] + fn msa_fslt_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsne.w"] + fn msa_fsne_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsne.d"] + fn msa_fsne_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsor.w"] + fn msa_fsor_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsor.d"] + fn msa_fsor_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsqrt.w"] + fn msa_fsqrt_w(a: f32x4) -> f32x4; + #[link_name = "llvm.mips.fsqrt.d"] + fn msa_fsqrt_d(a: f64x2) -> f64x2; + #[link_name = "llvm.mips.fsub.w"] + fn msa_fsub_w(a: f32x4, b: f32x4) -> f32x4; + #[link_name = "llvm.mips.fsub.d"] + fn msa_fsub_d(a: f64x2, b: f64x2) -> f64x2; + #[link_name = "llvm.mips.fsueq.w"] + fn msa_fsueq_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsueq.d"] + fn msa_fsueq_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsule.w"] + fn msa_fsule_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsule.d"] + fn msa_fsule_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsult.w"] + fn msa_fsult_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsult.d"] + fn msa_fsult_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsun.w"] + fn msa_fsun_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsun.d"] + fn msa_fsun_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.fsune.w"] + fn msa_fsune_w(a: f32x4, b: f32x4) -> i32x4; + #[link_name = "llvm.mips.fsune.d"] + fn msa_fsune_d(a: f64x2, b: f64x2) -> i64x2; + #[link_name = "llvm.mips.ftint.s.w"] + fn msa_ftint_s_w(a: f32x4) -> i32x4; + #[link_name = "llvm.mips.ftint.s.d"] + fn msa_ftint_s_d(a: f64x2) -> i64x2; + #[link_name = "llvm.mips.ftint.u.w"] + fn msa_ftint_u_w(a: f32x4) -> u32x4; + #[link_name = "llvm.mips.ftint.u.d"] + fn msa_ftint_u_d(a: f64x2) -> u64x2; + #[link_name = "llvm.mips.ftq.h"] + fn msa_ftq_h(a: f32x4, b: f32x4) -> i16x8; + #[link_name = "llvm.mips.ftq.w"] + fn msa_ftq_w(a: f64x2, b: f64x2) -> i32x4; + #[link_name = "llvm.mips.ftrunc.s.w"] + fn msa_ftrunc_s_w(a: f32x4) -> i32x4; + #[link_name = "llvm.mips.ftrunc.s.d"] + fn msa_ftrunc_s_d(a: f64x2) -> i64x2; + #[link_name = "llvm.mips.ftrunc.u.w"] + fn msa_ftrunc_u_w(a: f32x4) -> u32x4; + #[link_name = "llvm.mips.ftrunc.u.d"] + fn msa_ftrunc_u_d(a: f64x2) -> u64x2; + #[link_name = "llvm.mips.hadd.s.h"] + fn msa_hadd_s_h(a: i8x16, b: i8x16) -> i16x8; + #[link_name = "llvm.mips.hadd.s.w"] + fn msa_hadd_s_w(a: i16x8, b: i16x8) -> i32x4; + #[link_name = "llvm.mips.hadd.s.d"] + fn msa_hadd_s_d(a: i32x4, b: i32x4) -> i64x2; + #[link_name = "llvm.mips.hadd.u.h"] + fn msa_hadd_u_h(a: u8x16, b: u8x16) -> u16x8; + #[link_name = "llvm.mips.hadd.u.w"] + fn msa_hadd_u_w(a: u16x8, b: u16x8) -> u32x4; + #[link_name = "llvm.mips.hadd.u.d"] + fn msa_hadd_u_d(a: u32x4, b: u32x4) -> u64x2; + #[link_name = "llvm.mips.hsub.s.h"] + fn msa_hsub_s_h(a: i8x16, b: i8x16) -> i16x8; + #[link_name = "llvm.mips.hsub.s.w"] + fn msa_hsub_s_w(a: i16x8, b: i16x8) -> i32x4; + #[link_name = "llvm.mips.hsub.s.d"] + fn msa_hsub_s_d(a: i32x4, b: i32x4) -> i64x2; + #[link_name = "llvm.mips.hsub.u.h"] + fn msa_hsub_u_h(a: u8x16, b: u8x16) -> i16x8; + #[link_name = "llvm.mips.hsub.u.w"] + fn msa_hsub_u_w(a: u16x8, b: u16x8) -> i32x4; + #[link_name = "llvm.mips.hsub.u.d"] + fn msa_hsub_u_d(a: u32x4, b: u32x4) -> i64x2; + #[link_name = "llvm.mips.ilvev.b"] + fn msa_ilvev_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.ilvev.h"] + fn msa_ilvev_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.ilvev.w"] + fn msa_ilvev_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.ilvev.d"] + fn msa_ilvev_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.ilvl.b"] + fn msa_ilvl_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.ilvl.h"] + fn msa_ilvl_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.ilvl.w"] + fn msa_ilvl_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.ilvl.d"] + fn msa_ilvl_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.ilvod.b"] + fn msa_ilvod_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.ilvod.h"] + fn msa_ilvod_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.ilvod.w"] + fn msa_ilvod_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.ilvod.d"] + fn msa_ilvod_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.ilvr.b"] + fn msa_ilvr_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.ilvr.h"] + fn msa_ilvr_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.ilvr.w"] + fn msa_ilvr_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.ilvr.d"] + fn msa_ilvr_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.insert.b"] + fn msa_insert_b(a: i8x16, b: i32, c: i32) -> i8x16; //imm0_15 + #[link_name = "llvm.mips.insert.h"] + fn msa_insert_h(a: i16x8, b: i32, c: i32) -> i16x8; //imm0_7 + #[link_name = "llvm.mips.insert.w"] + fn msa_insert_w(a: i32x4, b: i32, c: i32) -> i32x4; //imm0_3 + #[link_name = "llvm.mips.insert.d"] + fn msa_insert_d(a: i64x2, b: i32, c: i64) -> i64x2; //imm0_1 + #[link_name = "llvm.mips.insve.b"] + fn msa_insve_b(a: i8x16, b: i32, c: i8x16) -> i8x16; //imm0_15 + #[link_name = "llvm.mips.insve.h"] + fn msa_insve_h(a: i16x8, b: i32, c: i16x8) -> i16x8; //imm0_7 + #[link_name = "llvm.mips.insve.w"] + fn msa_insve_w(a: i32x4, b: i32, c: i32x4) -> i32x4; //imm0_3 + #[link_name = "llvm.mips.insve.d"] + fn msa_insve_d(a: i64x2, b: i32, c: i64x2) -> i64x2; //imm0_1 + #[link_name = "llvm.mips.ld.b"] + fn msa_ld_b(mem_addr: *mut i8, b: i32) -> i8x16; //imm_n512_511 + #[link_name = "llvm.mips.ld.h"] + fn msa_ld_h(mem_addr: *mut i8, b: i32) -> i16x8; //imm_n1024_1022 + #[link_name = "llvm.mips.ld.w"] + fn msa_ld_w(mem_addr: *mut i8, b: i32) -> i32x4; //imm_n2048_2044 + #[link_name = "llvm.mips.ld.d"] + fn msa_ld_d(mem_addr: *mut i8, b: i32) -> i64x2; //imm_n4096_4088 + #[link_name = "llvm.mips.ldi.b"] + fn msa_ldi_b(a: i32) -> i8x16; // imm_n512_511 + #[link_name = "llvm.mips.ldi.h"] + fn msa_ldi_h(a: i32) -> i16x8; // imm_n512_511 + #[link_name = "llvm.mips.ldi.w"] + fn msa_ldi_w(a: i32) -> i32x4; // imm_n512_511 + #[link_name = "llvm.mips.ldi.d"] + fn msa_ldi_d(a: i32) -> i64x2; // imm_n512_511 + #[link_name = "llvm.mips.madd.q.h"] + fn msa_madd_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.madd.q.w"] + fn msa_madd_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.maddr.q.h"] + fn msa_maddr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.maddr.q.w"] + fn msa_maddr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.maddv.b"] + fn msa_maddv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16; + #[link_name = "llvm.mips.maddv.h"] + fn msa_maddv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.maddv.w"] + fn msa_maddv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.maddv.d"] + fn msa_maddv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2; + #[link_name = "llvm.mips.max.a.b"] + fn msa_max_a_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.max.a.h"] + fn msa_max_a_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.max.a.w"] + fn msa_max_a_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.max.a.d"] + fn msa_max_a_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.max.s.b"] + fn msa_max_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.max.s.h"] + fn msa_max_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.max.s.w"] + fn msa_max_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.max.s.d"] + fn msa_max_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.max.u.b"] + fn msa_max_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.max.u.h"] + fn msa_max_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.max.u.w"] + fn msa_max_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.max.u.d"] + fn msa_max_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.maxi.s.b"] + fn msa_maxi_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + #[link_name = "llvm.mips.maxi.s.h"] + fn msa_maxi_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + #[link_name = "llvm.mips.maxi.s.w"] + fn msa_maxi_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + #[link_name = "llvm.mips.maxi.s.d"] + fn msa_maxi_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + #[link_name = "llvm.mips.maxi.u.b"] + fn msa_maxi_u_b(a: u8x16, b: i32) -> u8x16; //imm0_31 + #[link_name = "llvm.mips.maxi.u.h"] + fn msa_maxi_u_h(a: u16x8, b: i32) -> u16x8; //imm0_31 + #[link_name = "llvm.mips.maxi.u.w"] + fn msa_maxi_u_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + #[link_name = "llvm.mips.maxi.u.d"] + fn msa_maxi_u_d(a: u64x2, b: i32) -> u64x2; //imm0_31 + #[link_name = "llvm.mips.min.a.b"] + fn msa_min_a_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.min.a.h"] + fn msa_min_a_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.min.a.w"] + fn msa_min_a_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.min.a.d"] + fn msa_min_a_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.min.s.b"] + fn msa_min_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.min.s.h"] + fn msa_min_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.min.s.w"] + fn msa_min_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.min.s.d"] + fn msa_min_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.min.u.b"] + fn msa_min_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.min.u.h"] + fn msa_min_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.min.u.w"] + fn msa_min_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.min.u.d"] + fn msa_min_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.mini.s.b"] + fn msa_mini_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + #[link_name = "llvm.mips.mini.s.h"] + fn msa_mini_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + #[link_name = "llvm.mips.mini.s.w"] + fn msa_mini_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + #[link_name = "llvm.mips.mini.s.d"] + fn msa_mini_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + #[link_name = "llvm.mips.mini.u.b"] + fn msa_mini_u_b(a: u8x16, b: i32) -> u8x16; //imm0_31 + #[link_name = "llvm.mips.mini.u.h"] + fn msa_mini_u_h(a: u16x8, b: i32) -> u16x8; //imm0_31 + #[link_name = "llvm.mips.mini.u.w"] + fn msa_mini_u_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + #[link_name = "llvm.mips.mini.u.d"] + fn msa_mini_u_d(a: u64x2, b: i32) -> u64x2; //imm0_31 + #[link_name = "llvm.mips.mod.s.b"] + fn msa_mod_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.mod.s.h"] + fn msa_mod_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.mod.s.w"] + fn msa_mod_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.mod.s.d"] + fn msa_mod_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.mod.u.b"] + fn msa_mod_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.mod.u.h"] + fn msa_mod_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.mod.u.w"] + fn msa_mod_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.mod.u.d"] + fn msa_mod_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.move.v"] + fn msa_move_v(a: i8x16) -> i8x16; + #[link_name = "llvm.mips.msub.q.h"] + fn msa_msub_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.msub.q.w"] + fn msa_msub_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.msubr.q.h"] + fn msa_msubr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.msubr.q.w"] + fn msa_msubr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.msubv.b"] + fn msa_msubv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16; + #[link_name = "llvm.mips.msubv.h"] + fn msa_msubv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.msubv.w"] + fn msa_msubv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.msubv.d"] + fn msa_msubv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2; + #[link_name = "llvm.mips.mul.q.h"] + fn msa_mul_q_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.mul.q.w"] + fn msa_mul_q_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.mulr.q.h"] + fn msa_mulr_q_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.mulr.q.w"] + fn msa_mulr_q_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.mulv.b"] + fn msa_mulv_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.mulv.h"] + fn msa_mulv_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.mulv.w"] + fn msa_mulv_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.mulv.d"] + fn msa_mulv_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.nloc.b"] + fn msa_nloc_b(a: i8x16) -> i8x16; + #[link_name = "llvm.mips.nloc.h"] + fn msa_nloc_h(a: i16x8) -> i16x8; + #[link_name = "llvm.mips.nloc.w"] + fn msa_nloc_w(a: i32x4) -> i32x4; + #[link_name = "llvm.mips.nloc.d"] + fn msa_nloc_d(a: i64x2) -> i64x2; + #[link_name = "llvm.mips.nlzc.b"] + fn msa_nlzc_b(a: i8x16) -> i8x16; + #[link_name = "llvm.mips.nlzc.h"] + fn msa_nlzc_h(a: i16x8) -> i16x8; + #[link_name = "llvm.mips.nlzc.w"] + fn msa_nlzc_w(a: i32x4) -> i32x4; + #[link_name = "llvm.mips.nlzc.d"] + fn msa_nlzc_d(a: i64x2) -> i64x2; + #[link_name = "llvm.mips.nor.v"] + fn msa_nor_v(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.nori.b"] + fn msa_nori_b(a: u8x16, b: i32) -> u8x16; //imm0_255 + #[link_name = "llvm.mips.or.v"] + fn msa_or_v(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.ori.b"] + fn msa_ori_b(a: u8x16, b: i32) -> u8x16; //imm0_255 + #[link_name = "llvm.mips.pckev.b"] + fn msa_pckev_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.pckev.h"] + fn msa_pckev_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.pckev.w"] + fn msa_pckev_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.pckev.d"] + fn msa_pckev_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.pckod.b"] + fn msa_pckod_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.pckod.h"] + fn msa_pckod_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.pckod.w"] + fn msa_pckod_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.pckod.d"] + fn msa_pckod_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.pcnt.b"] + fn msa_pcnt_b(a: i8x16) -> i8x16; + #[link_name = "llvm.mips.pcnt.h"] + fn msa_pcnt_h(a: i16x8) -> i16x8; + #[link_name = "llvm.mips.pcnt.w"] + fn msa_pcnt_w(a: i32x4) -> i32x4; + #[link_name = "llvm.mips.pcnt.d"] + fn msa_pcnt_d(a: i64x2) -> i64x2; + #[link_name = "llvm.mips.sat.s.b"] + fn msa_sat_s_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + #[link_name = "llvm.mips.sat.s.h"] + fn msa_sat_s_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + #[link_name = "llvm.mips.sat.s.w"] + fn msa_sat_s_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + #[link_name = "llvm.mips.sat.s.d"] + fn msa_sat_s_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + #[link_name = "llvm.mips.sat.u.b"] + fn msa_sat_u_b(a: u8x16, b: i32) -> u8x16; //imm0_7 + #[link_name = "llvm.mips.sat.u.h"] + fn msa_sat_u_h(a: u16x8, b: i32) -> u16x8; //imm0_15 + #[link_name = "llvm.mips.sat.u.w"] + fn msa_sat_u_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + #[link_name = "llvm.mips.sat.u.d"] + fn msa_sat_u_d(a: u64x2, b: i32) -> u64x2; //imm0_63 + #[link_name = "llvm.mips.shf.b"] + fn msa_shf_b(a: i8x16, b: i32) -> i8x16; //imm0_255 + #[link_name = "llvm.mips.shf.h"] + fn msa_shf_h(a: i16x8, b: i32) -> i16x8; //imm0_255 + #[link_name = "llvm.mips.shf.w"] + fn msa_shf_w(a: i32x4, b: i32) -> i32x4; //imm0_255 + #[link_name = "llvm.mips.sld.b"] + fn msa_sld_b(a: i8x16, b: i8x16, c: i32) -> i8x16; + #[link_name = "llvm.mips.sld.h"] + fn msa_sld_h(a: i16x8, b: i16x8, c: i32) -> i16x8; + #[link_name = "llvm.mips.sld.w"] + fn msa_sld_w(a: i32x4, b: i32x4, c: i32) -> i32x4; + #[link_name = "llvm.mips.sld.d"] + fn msa_sld_d(a: i64x2, b: i64x2, c: i32) -> i64x2; + #[link_name = "llvm.mips.sldi.b"] + fn msa_sldi_b(a: i8x16, b: i8x16, c: i32) -> i8x16; //imm0_15 + #[link_name = "llvm.mips.sldi.h"] + fn msa_sldi_h(a: i16x8, b: i16x8, c: i32) -> i16x8; //imm0_7 + #[link_name = "llvm.mips.sldi.w"] + fn msa_sldi_w(a: i32x4, b: i32x4, c: i32) -> i32x4; //imm0_3 + #[link_name = "llvm.mips.sldi.d"] + fn msa_sldi_d(a: i64x2, b: i64x2, c: i32) -> i64x2; //imm0_1 + #[link_name = "llvm.mips.sll.b"] + fn msa_sll_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.sll.h"] + fn msa_sll_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.sll.w"] + fn msa_sll_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.sll.d"] + fn msa_sll_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.slli.b"] + fn msa_slli_b(a: i8x16, b: i32) -> i8x16; //imm0_15 + #[link_name = "llvm.mips.slli.h"] + fn msa_slli_h(a: i16x8, b: i32) -> i16x8; //imm0_7 + #[link_name = "llvm.mips.slli.w"] + fn msa_slli_w(a: i32x4, b: i32) -> i32x4; //imm0_3 + #[link_name = "llvm.mips.slli.d"] + fn msa_slli_d(a: i64x2, b: i32) -> i64x2; //imm0_1 + #[link_name = "llvm.mips.splat.b"] + fn msa_splat_b(a: i8x16, c: i32) -> i8x16; + #[link_name = "llvm.mips.splat.h"] + fn msa_splat_h(a: i16x8, c: i32) -> i16x8; + #[link_name = "llvm.mips.splat.w"] + fn msa_splat_w(a: i32x4, w: i32) -> i32x4; + #[link_name = "llvm.mips.splat.d"] + fn msa_splat_d(a: i64x2, c: i32) -> i64x2; + #[link_name = "llvm.mips.splati.b"] + fn msa_splati_b(a: i8x16, b: i32) -> i8x16; //imm0_15 + #[link_name = "llvm.mips.splati.h"] + fn msa_splati_h(a: i16x8, b: i32) -> i16x8; //imm0_7 + #[link_name = "llvm.mips.splati.w"] + fn msa_splati_w(a: i32x4, b: i32) -> i32x4; //imm0_3 + #[link_name = "llvm.mips.splati.d"] + fn msa_splati_d(a: i64x2, b: i32) -> i64x2; //imm0_1 + #[link_name = "llvm.mips.sra.b"] + fn msa_sra_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.sra.h"] + fn msa_sra_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.sra.w"] + fn msa_sra_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.sra.d"] + fn msa_sra_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.srai.b"] + fn msa_srai_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + #[link_name = "llvm.mips.srai.h"] + fn msa_srai_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + #[link_name = "llvm.mips.srai.w"] + fn msa_srai_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + #[link_name = "llvm.mips.srai.d"] + fn msa_srai_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + #[link_name = "llvm.mips.srar.b"] + fn msa_srar_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.srar.h"] + fn msa_srar_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.srar.w"] + fn msa_srar_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.srar.d"] + fn msa_srar_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.srari.b"] + fn msa_srari_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + #[link_name = "llvm.mips.srari.h"] + fn msa_srari_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + #[link_name = "llvm.mips.srari.w"] + fn msa_srari_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + #[link_name = "llvm.mips.srari.d"] + fn msa_srari_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + #[link_name = "llvm.mips.srl.b"] + fn msa_srl_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.srl.h"] + fn msa_srl_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.srl.w"] + fn msa_srl_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.srl.d"] + fn msa_srl_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.srli.b"] + fn msa_srli_b(a: i8x16, b: i32) -> i8x16; //imm0_15 + #[link_name = "llvm.mips.srli.h"] + fn msa_srli_h(a: i16x8, b: i32) -> i16x8; //imm0_7 + #[link_name = "llvm.mips.srli.w"] + fn msa_srli_w(a: i32x4, b: i32) -> i32x4; //imm0_3 + #[link_name = "llvm.mips.srli.d"] + fn msa_srli_d(a: i64x2, b: i32) -> i64x2; //imm0_1 + #[link_name = "llvm.mips.srlr.b"] + fn msa_srlr_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.srlr.h"] + fn msa_srlr_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.srlr.w"] + fn msa_srlr_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.srlr.d"] + fn msa_srlr_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.srlri.b"] + fn msa_srlri_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + #[link_name = "llvm.mips.srlri.h"] + fn msa_srlri_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + #[link_name = "llvm.mips.srlri.w"] + fn msa_srlri_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + #[link_name = "llvm.mips.srlri.d"] + fn msa_srlri_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + #[link_name = "llvm.mips.st.b"] + fn msa_st_b(a: i8x16, mem_addr: *mut i8, imm_s10: i32) -> (); //imm_n512_511 + #[link_name = "llvm.mips.st.h"] + fn msa_st_h(a: i16x8, mem_addr: *mut i8, imm_s11: i32) -> (); //imm_n1024_1022 + #[link_name = "llvm.mips.st.w"] + fn msa_st_w(a: i32x4, mem_addr: *mut i8, imm_s12: i32) -> (); //imm_n2048_2044 + #[link_name = "llvm.mips.st.d"] + fn msa_st_d(a: i64x2, mem_addr: *mut i8, imm_s13: i32) -> (); //imm_n4096_4088 + #[link_name = "llvm.mips.subs.s.b"] + fn msa_subs_s_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.subs.s.h"] + fn msa_subs_s_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.subs.s.w"] + fn msa_subs_s_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.subs.s.d"] + fn msa_subs_s_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.subs.u.b"] + fn msa_subs_u_b(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.subs.u.h"] + fn msa_subs_u_h(a: u16x8, b: u16x8) -> u16x8; + #[link_name = "llvm.mips.subs.u.w"] + fn msa_subs_u_w(a: u32x4, b: u32x4) -> u32x4; + #[link_name = "llvm.mips.subs.u.d"] + fn msa_subs_u_d(a: u64x2, b: u64x2) -> u64x2; + #[link_name = "llvm.mips.subsus.u.b"] + fn msa_subsus_u_b(a: u8x16, b: i8x16) -> u8x16; + #[link_name = "llvm.mips.subsus.u.h"] + fn msa_subsus_u_h(a: u16x8, b: i16x8) -> u16x8; + #[link_name = "llvm.mips.subsus.u.w"] + fn msa_subsus_u_w(a: u32x4, b: i32x4) -> u32x4; + #[link_name = "llvm.mips.subsus.u.d"] + fn msa_subsus_u_d(a: u64x2, b: i64x2) -> u64x2; + #[link_name = "llvm.mips.subsuu.s.b"] + fn msa_subsuu_s_b(a: u8x16, b: u8x16) -> i8x16; + #[link_name = "llvm.mips.subsuu.s.h"] + fn msa_subsuu_s_h(a: u16x8, b: u16x8) -> i16x8; + #[link_name = "llvm.mips.subsuu.s.w"] + fn msa_subsuu_s_w(a: u32x4, b: u32x4) -> i32x4; + #[link_name = "llvm.mips.subsuu.s.d"] + fn msa_subsuu_s_d(a: u64x2, b: u64x2) -> i64x2; + #[link_name = "llvm.mips.subv.b"] + fn msa_subv_b(a: i8x16, b: i8x16) -> i8x16; + #[link_name = "llvm.mips.subv.h"] + fn msa_subv_h(a: i16x8, b: i16x8) -> i16x8; + #[link_name = "llvm.mips.subv.w"] + fn msa_subv_w(a: i32x4, b: i32x4) -> i32x4; + #[link_name = "llvm.mips.subv.d"] + fn msa_subv_d(a: i64x2, b: i64x2) -> i64x2; + #[link_name = "llvm.mips.subvi.b"] + fn msa_subvi_b(a: i8x16, b: i32) -> i8x16; + #[link_name = "llvm.mips.subvi.h"] + fn msa_subvi_h(a: i16x8, b: i32) -> i16x8; + #[link_name = "llvm.mips.subvi.w"] + fn msa_subvi_w(a: i32x4, b: i32) -> i32x4; + #[link_name = "llvm.mips.subvi.d"] + fn msa_subvi_d(a: i64x2, b: i32) -> i64x2; + #[link_name = "llvm.mips.vshf.b"] + fn msa_vshf_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16; + #[link_name = "llvm.mips.vshf.h"] + fn msa_vshf_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + #[link_name = "llvm.mips.vshf.w"] + fn msa_vshf_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + #[link_name = "llvm.mips.vshf.d"] + fn msa_vshf_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2; + #[link_name = "llvm.mips.xor.v"] + fn msa_xor_v(a: u8x16, b: u8x16) -> u8x16; + #[link_name = "llvm.mips.xori.b"] + fn msa_xori_b(a: u8x16, b: i32) -> u8x16; //imm0_255 + } /// Vector Add Absolute Values. /// -/// Adds the absolute values of the elements in `a` and `b` into the result -/// vector. +/// The absolute values of the elements in vector in `a` (sixteen signed 8-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (sixteen signed 8-bit integer numbers) +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.b))] -pub unsafe fn __msa_add_a_b(a: i8x16, b: i8x16) -> i8x16 { +unsafe fn __msa_add_a_b(a: i8x16, b: i8x16) -> i8x16 { msa_add_a_b(a, b) } -#[cfg(test)] -mod tests { - use crate::core_arch::mips64::msa; - use crate::simd::*; - use stdsimd_test::simd_test; +/// Vector Add Absolute Values +/// +/// The absolute values of the elements in vector in `a` (eight signed 16-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(add_a.h))] +unsafe fn __msa_add_a_h(a: i16x8, b: i16x8) -> i16x8 { + msa_add_a_h(a, b) +} - #[simd_test(enable = "msa")] - unsafe fn __msa_add_a_b() { - #[rustfmt::skip] - let a = i8x16( - 1, 2, 3, 4, - 1, 2, 3, 4, - 1, 2, 3, 4, - 1, 2, 3, 4, - ); - #[rustfmt::skip] - let b = i8x16( - -4, -3, -2, -1, - -4, -3, -2, -1, - -4, -3, -2, -1, - -4, -3, -2, -1, +/// Vector Add Absolute Values +/// +/// The absolute values of the elements in vector in `a` (four signed 32-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(add_a.w))] +unsafe fn __msa_add_a_w(a: i32x4, b: i32x4) -> i32x4 { + msa_add_a_w(a, b) +} + +/// Vector Add Absolute Values +/// +/// The absolute values of the elements in vector in `a` (two signed 64-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (two signed 64-bit integer numbers) +// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(add_a.d))] +unsafe fn __msa_add_a_d(a: i64x2, b: i64x2) -> i64x2 { + msa_add_a_d(a, b) +} + +/// Signed Saturated Vector Saturated Add of Absolute Values +/// +/// The absolute values of the elements in vector in `a` (sixteen signed 8-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (sixteen signed 8-bit integer numbers) +/// The saturated signed result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_a.b))] +unsafe fn __msa_adds_a_b(a: i8x16, b: i8x16) -> i8x16 { + msa_adds_a_b(a, b) +} + +/// Vector Saturated Add of Absolute Values +/// +/// The absolute values of the elements in vector in `a` (eight signed 16-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (eight signed 16-bit integer numbers) +/// The saturated signed result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_a.h))] +unsafe fn __msa_adds_a_h(a: i16x8, b: i16x8) -> i16x8 { + msa_adds_a_h(a, b) +} + +/// Vector Saturated Add of Absolute Values +/// +/// The absolute values of the elements in vector in `a` (four signed 32-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (four signed 32-bit integer numbers) +/// The saturated signed result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_a.w))] +unsafe fn __msa_adds_a_w(a: i32x4, b: i32x4) -> i32x4 { + msa_adds_a_w(a, b) +} + +/// Vector Saturated Add of Absolute Values +/// +/// The absolute values of the elements in vector in `a` (two signed 64-bit integer numbers) +/// are added to the absolute values of the elements in vector `b` (two signed 64-bit integer numbers) +// The saturated signed result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_a.d))] +unsafe fn __msa_adds_a_d(a: i64x2, b: i64x2) -> i64x2 { + msa_adds_a_d(a, b) +} + +/// Vector Signed Saturated Add of Signed Values +/// +/// The elements in vector in `a` (sixteen signed 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen signed 8-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_s.b))] +unsafe fn __msa_adds_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_adds_s_b(a, b) +} + +/// Vector Signed Saturated Add of Signed Values +/// +/// The elements in vector in `a` (eight signed 16-bit integer numbers) +/// are added to the elements in vector `b` (eight signed 16-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_s.h))] +unsafe fn __msa_adds_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_adds_s_h(a, b) +} + +/// Vector Signed Saturated Add of Signed Values +/// +/// The elements in vector in `a` (four signed 32-bit integer numbers) +/// are added to the elements in vector `b` (four signed 32-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_s.w))] +unsafe fn __msa_adds_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_adds_s_w(a, b) +} + +/// Vector Signed Saturated Add of Signed Values +/// +/// The elements in vector in `a` (two signed 64-bit integer numbers) +/// are added to the elements in vector `b` (two signed 64-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_s.d))] +unsafe fn __msa_adds_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_adds_s_d(a, b) +} + +/// Vector Unsigned Saturated Add of Unsigned Values +/// +/// The elements in vector in `a` (sixteen unsigned 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen unsigned 8-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_u.b))] +unsafe fn __msa_adds_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_adds_u_b(a, b) +} + +/// Vector Unsigned Saturated Add of Unsigned Values +/// +/// The elements in vector in `a` (eight unsigned 16-bit integer numbers) +/// are added to the elements in vector `b` (eight unsigned 16-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_u.h))] +unsafe fn __msa_adds_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_adds_u_h(a, b) +} + +/// Vector Unsigned Saturated Add of Unsigned Values +/// +/// The elements in vector in `a` (four unsigned 32-bit integer numbers) +/// are added to the elements in vector `b` (four unsigned 32-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_u.w))] +unsafe fn __msa_adds_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_adds_u_w(a, b) +} + +/// Vector Unsigned Saturated Add of Unsigned Values +/// +/// The elements in vector in `a` (two unsigned 64-bit integer numbers) +/// are added to the elements in vector `b` (two unsigned 64-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(adds_u.d))] +unsafe fn __msa_adds_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_adds_u_d(a, b) +} + +/// Vector Add +/// +/// The elements in vector in `a` (sixteen signed 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen signed 8-bit integer numbers) +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addv.b))] +unsafe fn __msa_addv_b(a: i8x16, b: i8x16) -> i8x16 { + msa_addv_b(a, b) +} + +/// Vector Add +/// +/// The elements in vector in `a` (eight signed 16-bit integer numbers) +/// are added to the elements in vector `b` (eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addv.h))] +unsafe fn __msa_addv_h(a: i16x8, b: i16x8) -> i16x8 { + msa_addv_h(a, b) +} + +/// Vector Add +/// +/// The elements in vector in `a` (four signed 32-bit integer numbers) +/// are added to the elements in vector `b` (four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addv.w))] +unsafe fn __msa_addv_w(a: i32x4, b: i32x4) -> i32x4 { + msa_addv_w(a, b) +} + +/// Vector Add +/// +/// The elements in vector in `a` (two signed 64-bit integer numbers) +/// are added to the elements in vector `b` (two signed 64-bit integer numbers) +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addv.d))] +unsafe fn __msa_addv_d(a: i64x2, b: i64x2) -> i64x2 { + msa_addv_d(a, b) +} + + +/// Immediate Add +/// +/// The 5-bit immediate unsigned value u5 is added to the elements +/// vector in `a` (sixteen signed 8-bit integer numbers) +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addvi.b, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_addvi_b(a: i8x16, imm5: u32) -> i8x16 { + macro_rules! call { + ($imm5:expr) => { + msa_addvi_b(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Add +/// +/// The 5-bit immediate unsigned value u5 is added to the elements +/// vector in `a` (eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addvi.h, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_addvi_h(a: i16x8, imm5: u32) -> i16x8 { + macro_rules! call { + ($imm5:expr) => { + msa_addvi_h(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Add +/// +/// The 5-bit immediate unsigned value u5 is added to the elements +/// vector in `a` (four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addvi.w, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_addvi_w(a: i32x4, imm5: u32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_addvi_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Add +/// +/// The 5-bit immediate unsigned value u5 is added to the elements +/// vector in `a` (two signed 64-bit integer numbers) +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(addvi.d, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_addvi_d(a: i64x2, imm5: u32) -> i64x2 { + macro_rules! call { + ($imm5:expr) => { + msa_addvi_d(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Logical And +/// +/// Each bit of vector `a` (sixteen unsigned 8-bit integer numbers) +/// is combined with the corresponding bit of vector 'b' (sixteen unsigned 8-bit integer numbers). +/// in a bitwise logical AND operation. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(and.v))] +unsafe fn __msa_and_v(a: u8x16, b: u8x16) -> u8x16 { + msa_and_v(a, b) +} + +/// Immediate Logical And +/// +/// Each byte element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// is combined with the 8-bit immediate i8 (signed 8-bit integer number) in a bitwise logical AND operation. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(andi.b, imm8 = 0b10010111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_andi_b(a: u8x16, imm8: u32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_andi_b(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Vector Absolute Values of Signed Subtract +/// +/// The signed elements in vector `a` (sixteen signed 8-bit integer numbers) +/// are subtracted from the signed elements in vector `b` (sixteen signed 8-bit integer numbers) +/// The absolute value of the signed result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_s.b))] +unsafe fn __msa_asub_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_asub_s_b(a, b) +} + +/// Vector Absolute Values of Signed Subtract +/// +/// The signed elements in vector `a` (eight signed 16-bit integer numbers) +/// are subtracted from the signed elements in vector `b` (eight signed 16-bit integer numbers) +/// The absolute value of the signed result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_s.h))] +unsafe fn __msa_asub_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_asub_s_h(a, b) +} + +/// Vector Absolute Values of Signed Subtract +/// +/// The signed elements in vector `a` (four signed 32-bit integer numbers) +/// are subtracted from the signed elements in vector `b` (four signed 32-bit integer numbers) +/// The absolute value of the signed result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_s.w))] +unsafe fn __msa_asub_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_asub_s_w(a, b) +} + +/// Vector Absolute Values of Signed Subtract +/// +/// The signed elements in vector `a` (two signed 64-bit integer numbers) +/// are subtracted from the signed elements in vector `b` (two signed 64-bit integer numbers) +/// The absolute value of the signed result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_s.d))] +unsafe fn __msa_asub_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_asub_s_d(a, b) +} + +/// Vector Absolute Values of Unsigned Subtract +/// +/// The unsigned elements in vector `a` (sixteen unsigned 8-bit integer numbers) +/// are subtracted from the unsigned elements in vector `b` (sixteen unsigned 8-bit integer numbers) +/// The absolute value of the unsigned result is written to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_u.b))] +unsafe fn __msa_asub_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_asub_u_b(a, b) +} + +/// Vector Absolute Values of Unsigned Subtract +/// +/// The unsigned elements in vector `a` (eight unsigned 16-bit integer numbers) +/// are subtracted from the unsigned elements in vector `b` (eight unsigned 16-bit integer numbers) +/// The absolute value of the unsigned result is written to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_u.h))] +unsafe fn __msa_asub_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_asub_u_h(a, b) +} + +/// Vector Absolute Values of Unsigned Subtract +/// +/// The unsigned elements in vector `a` (four unsigned 32-bit integer numbers) +/// are subtracted from the unsigned elements in vector `b` (four unsigned 32-bit integer numbers) +/// The absolute value of the unsigned result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_u.w))] +unsafe fn __msa_asub_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_asub_u_w(a, b) +} + +/// Vector Absolute Values of Unsigned Subtract +/// +/// The unsigned elements in vector `a` (two unsigned 64-bit integer numbers) +/// are subtracted from the unsigned elements in vector `b` (two unsigned 64-bit integer numbers) +/// The absolute value of the unsigned result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(asub_u.d))] +unsafe fn __msa_asub_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_asub_u_d(a, b) +} + +/// Vector Signed Average +/// +/// The elements in vector `a` (sixteen signed 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen signed 8-bit integer numbers) +/// The addition is done signed with full precision, i.e.the result has one extra bit +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_s.b))] +unsafe fn __msa_ave_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_ave_s_b(a, b) +} + +/// Vector Signed Average +/// +/// The elements in vector `a` (eight signed 16-bit integer numbers) +/// are added to the elements in vector `b` (eight signed 16-bit integer numbers) +/// The addition is done signed with full precision, i.e.the result has one extra bit +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_s.h))] +unsafe fn __msa_ave_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_ave_s_h(a, b) +} + +/// Vector Signed Average +/// +/// The elements in vector `a` (four signed 32-bit integer numbers) +/// are added to the elements in vector `b` (four signed 32-bit integer numbers) +/// The addition is done signed with full precision, i.e.the result has one extra bit +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_s.w))] +unsafe fn __msa_ave_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_ave_s_w(a, b) +} + +/// Vector Signed Average +/// +/// The elements in vector `a` (two signed 64-bit integer numbers) +/// are added to the elements in vector `b` (two signed 64-bit integer numbers) +/// The addition is done signed with full precision, i.e.the result has one extra bit +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_s.d))] +unsafe fn __msa_ave_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_ave_s_d(a, b) +} + +/// Vector Unsigned Average +/// +/// The elements in vector `a` (sixteen unsigned 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen unsigned 8-bit integer numbers) +/// The addition is done unsigned with full precision, i.e.the result has one extra bit +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_u.b))] +unsafe fn __msa_ave_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_ave_u_b(a, b) +} + +/// Vector Unsigned Average +/// +/// The elements in vector `a` (eight unsigned 16-bit integer numbers) +/// are added to the elements in vector `b` (eight unsigned 16-bit integer numbers) +/// The addition is done unsigned with full precision, i.e.the result has one extra bit +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_u.h))] +unsafe fn __msa_ave_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_ave_u_h(a, b) +} + +/// Vector Unsigned Average +/// +/// The elements in vector `a` (four unsigned 32-bit integer numbers) +/// are added to the elements in vector `b` (four unsigned 32-bit integer numbers) +/// The addition is done unsigned with full precision, i.e.the result has one extra bit +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_u.w))] +unsafe fn __msa_ave_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_ave_u_w(a, b) +} + +/// Vector Unsigned Average +/// +/// The elements in vector `a` (two unsigned 64-bit integer numbers) +/// are added to the elements in vector `b` (two unsigned 64-bit integer numbers) +/// The addition is done unsigned with full precision, i.e.the result has one extra bit +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ave_u.d))] +unsafe fn __msa_ave_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_ave_u_d(a, b) +} + +/// Vector Signed Average Rounded +/// +/// The elements in vector `a` (sixteen signed 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen signed 8-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done signed with full precision, +/// i.e. the result has one extra bit. +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_s.b))] +unsafe fn __msa_aver_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_aver_s_b(a, b) +} + +/// Vector Signed Average Rounded +/// +/// The elements in vector `a` (eight signed 16-bit integer numbers) +/// are added to the elements in vector `b` (eight signed 16-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done signed with full precision, +/// i.e. the result has one extra bit. +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_s.h))] +unsafe fn __msa_aver_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_aver_s_h(a, b) +} + +/// Vector Signed Average Rounded +/// +/// The elements in vector `a` (four signed 32-bit integer numbers) +/// are added to the elements in vector `b` (four signed 32-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done signed with full precision, +/// i.e. the result has one extra bit. +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_s.w))] +unsafe fn __msa_aver_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_aver_s_w(a, b) +} + +/// Vector Signed Average Rounded +/// +/// The elements in vector `a` (two signed 64-bit integer numbers) +/// are added to the elements in vector `b` (two signed 64-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done signed with full precision, +/// i.e. the result has one extra bit. +/// Signed division by 2 (or arithmetic shift right by one bit) is performed before +/// writing the result to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_s.d))] +unsafe fn __msa_aver_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_aver_s_d(a, b) +} + +/// Vector Unsigned Average Rounded +/// +/// The elements in vector `a` (sixteen unsigned 8-bit integer numbers) +/// are added to the elements in vector `b` (sixteen unsigned 8-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done unsigned with full precision, +/// i.e. the result has one extra bit. +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_u.b))] +unsafe fn __msa_aver_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_aver_u_b(a, b) +} + +/// Vector Unsigned Average Rounded +/// +/// The elements in vector `a` (eight unsigned 16-bit integer numbers) +/// are added to the elements in vector `b` (eight unsigned 16-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done unsigned with full precision, +/// i.e. the result has one extra bit. +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_u.h))] +unsafe fn __msa_aver_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_aver_u_h(a, b) +} + +/// Vector Unsigned Average Rounded +/// +/// The elements in vector `a` (four unsigned 32-bit integer numbers) +/// are added to the elements in vector `b` (four unsigned 32-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done unsigned with full precision, +/// i.e. the result has one extra bit. +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_u.w))] +unsafe fn __msa_aver_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_aver_u_w(a, b) +} + +/// Vector Unsigned Average Rounded +/// +/// The elements in vector `a` (two unsigned 64-bit integer numbers) +/// are added to the elements in vector `b` (two unsigned 64-bit integer numbers) +/// The addition of the elements plus 1 (for rounding) is done unsigned with full precision, +/// i.e. the result has one extra bit. +/// Unsigned division by 2 (or logical shift right by one bit) is performed before +/// writing the result to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(aver_u.d))] +unsafe fn __msa_aver_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_aver_u_d(a, b) +} + +/// Vector Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// The bit position is given by the elements in `b` (sixteen unsigned 8-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclr.b))] +unsafe fn __msa_bclr_b(a: u8x16, b: u8x16) -> u8x16 { + msa_bclr_b(a, b) +} + +/// Vector Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (eight unsigned 16-bit integer numbers) +/// The bit position is given by the elements in `b` (eight unsigned 16-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclr.h))] +unsafe fn __msa_bclr_h(a: u16x8, b: u16x8) -> u16x8 { + msa_bclr_h(a, b) +} + +/// Vector Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (four unsigned 32-bit integer numbers) +/// The bit position is given by the elements in `b` (four unsigned 32-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclr.w))] +unsafe fn __msa_bclr_w(a: u32x4, b: u32x4) -> u32x4 { + msa_bclr_w(a, b) +} + +/// Vector Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (two unsigned 64-bit integer numbers) +/// The bit position is given by the elements in `b` (two unsigned 64-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclr.d))] +unsafe fn __msa_bclr_d(a: u64x2, b: u64x2) -> u64x2 { + msa_bclr_d(a, b) +} + +/// Immediate Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// The bit position is given by the immediate 'm' modulo the size of the element in bits. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclri.b, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bclri_b(a: u8x16, imm3: i32) -> u8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_bclri_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (eight unsigned 16-bit integer numbers) +/// The bit position is given by the immediate 'm' modulo the size of the element in bits. +/// The result is written to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclri.h, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bclri_h(a: u16x8, imm4: i32) -> u16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_bclri_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (four unsigned 32-bit integer numbers) +/// The bit position is given by the immediate 'm' modulo the size of the element in bits. +/// The result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclri.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bclri_w(a: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_bclri_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Bit Clear +/// +/// Clear (set to 0) one bit in each element of vector `a` (two unsigned 64-bit integer numbers) +/// The bit position is given by the immediate 'm' modulo the size of the element in bits. +/// The result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bclri.d, imm6 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bclri_d(a: u64x2, imm6: i32) -> u64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_bclri_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Vector Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (sixteen unsigned 8-bit integer numbers) +/// to elements in vector 'a' (sixteen unsigned 8-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the elements in vector 'c' (sixteen unsigned 8-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsl.b))] +unsafe fn __msa_binsl_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { + msa_binsl_b(a, b, c) +} + +/// Vector Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (eight unsigned 16-bit integer numbers) +/// to elements in vector 'a' (eight unsigned 16-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the elements in vector 'c' (eight unsigned 16-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsl.h))] +unsafe fn __msa_binsl_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8 { + msa_binsl_h(a, b, c) +} + +/// Vector Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (four unsigned 32-bit integer numbers) +/// to elements in vector 'a' (four unsigned 32-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the elements in vector 'c' (four unsigned 32-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsl.w))] +unsafe fn __msa_binsl_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4 { + msa_binsl_w(a, b, c) +} + +/// Vector Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (two unsigned 64-bit integer numbers) +/// to elements in vector 'a' (two unsigned 64-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the elements in vector 'c' (two unsigned 64-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsl.d))] +unsafe fn __msa_binsl_d(a: u64x2, b: u64x2, c:u64x2) -> u64x2 { + msa_binsl_d(a, b, c) +} + +/// Immediate Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (sixteen unsigned 8-bit integer numbers) +/// to elements in vector 'a' (sixteen unsigned 8-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the immediate imm3 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsli_b(a: u8x16, b: u8x16, imm3: i32) -> u8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_binsli_b(a, b, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (eight unsigned 16-bit integer numbers) +/// to elements in vector 'a' (eight unsigned 16-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the immediate imm4 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsli_h(a: u16x8, b: u16x8, imm4: i32) -> u16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_binsli_h(a, b, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (four unsigned 32-bit integer numbers) +/// to elements in vector 'a' (four unsigned 32-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the immediate imm5 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsli_w(a: u32x4, b: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_binsli_w(a, b, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Bit Insert Left +/// +/// Copy most significant (left) bits in each element of vector `b` (two unsigned 64-bit integer numbers) +/// to elements in vector 'a' (two unsigned 64-bit integer numbers) while preserving the least sig-nificant (right) bits. +/// The number of bits to copy is given by the immediate imm6 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsli_d(a: u64x2, b: u64x2, imm6: i32) -> u64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_binsli_d(a, b, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Vector Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (sixteen unsigned 8-bit integer numbers) +/// to elements in vector 'a' (sixteen unsigned 8-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the elements in vector 'c' (sixteen unsigned 8-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsr.b))] +unsafe fn __msa_binsr_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { + msa_binsr_b(a, b, c) +} + +/// Vector Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (eight unsigned 16-bit integer numbers) +/// to elements in vector 'a' (eight unsigned 16-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the elements in vector 'c' (eight unsigned 16-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsr.h))] +unsafe fn __msa_binsr_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8 { + msa_binsr_h(a, b, c) +} + +/// Vector Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (four unsigned 32-bit integer numbers) +/// to elements in vector 'a' (four unsigned 32-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the elements in vector 'c' (four unsigned 32-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsr.w))] +unsafe fn __msa_binsr_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4 { + msa_binsr_w(a, b, c) +} + +/// Vector Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (two unsigned 64-bit integer numbers) +/// to elements in vector 'a' (two unsigned 64-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the elements in vector 'c' (two unsigned 64-bit integer numbers) +/// modulo the size of the element inbits plus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsr.d))] +unsafe fn __msa_binsr_d(a: u64x2, b: u64x2, c:u64x2) -> u64x2 { + msa_binsr_d(a, b, c) +} + +/// Immediate Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (sixteen unsigned 8-bit integer numbers) +/// to elements in vector 'a' (sixteen unsigned 8-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the immediate imm3 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsri_b(a: u8x16, b: u8x16, imm3: i32) -> u8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_binsri_b(a, b, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (eight unsigned 16-bit integer numbers) +/// to elements in vector 'a' (eight unsigned 16-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the immediate imm4 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsri_h(a: u16x8, b: u16x8, imm4: i32) -> u16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_binsri_h(a, b, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (four unsigned 32-bit integer numbers) +/// to elements in vector 'a' (four unsigned 32-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the immediate imm5 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsri_w(a: u32x4, b: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_binsri_w(a, b, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Bit Insert Right +/// +/// Copy most significant (right) bits in each element of vector `b` (two unsigned 64-bit integer numbers) +/// to elements in vector 'a' (two unsigned 64-bit integer numbers) while preserving the least sig-nificant (left) bits. +/// The number of bits to copy is given by the immediate imm6 modulo the size of the element in bitsplus 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_binsri_d(a: u64x2, b: u64x2, imm6: i32) -> u64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_binsri_d(a, b, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Vector Bit Move If Not Zero +/// +/// Copy to destination vector 'a' (sixteen unsigned 8-bit integer numbers) all bits from source vector +/// 'b' (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from target vector 'c' +/// (sixteen unsigned 8-bit integer numbers) are 1 and leaves unchanged all destination bits +/// for which the corresponding target bits are 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bmnz.v))] +unsafe fn __msa_bmnz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { + msa_bmnz_v(a, b, c) +} + +/// Immediate Bit Move If Not Zero +/// +/// Copy to destination vector 'a' (sixteen unsigned 8-bit integer numbers) all bits from source vector +/// 'b' (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from from immediate imm8 +/// are 1 and leaves unchanged all destination bits for which the corresponding target bits are 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_bmnzi_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_bmnzi_b(a, b, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Vector Bit Move If Zero +/// +/// Copy to destination vector 'a' (sixteen unsigned 8-bit integer numbers) all bits from source vector +/// 'b' (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from target vector 'c' +/// (sixteen unsigned 8-bit integer numbers) are 0 and leaves unchanged all destination bits +/// for which the corresponding target bits are 1 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bmz.v))] +unsafe fn __msa_bmz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { + msa_bmz_v(a, b, c) +} + +/// Immediate Bit Move If Zero +/// +/// Copy to destination vector 'a' (sixteen unsigned 8-bit integer numbers) all bits from source vector +/// 'b' (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from from immediate imm8 +/// are 0 and leaves unchanged all destination bits for which the corresponding immediate bits are 1. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_bmzi_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_bmzi_b(a, b, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Vector Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (sixteen unsigned 8-bit integer numbers) +/// modulo thesize of the element in bits. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bneg.b))] +unsafe fn __msa_bneg_b(a: u8x16, b: u8x16) -> u8x16 { + msa_bneg_b(a, b) +} + +/// Vector Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (eight unsigned 16-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (eight unsigned 16-bit integer numbers) +/// modulo thesize of the element in bits. +/// The result is written to vector (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bneg.h))] +unsafe fn __msa_bneg_h(a: u16x8, b: u16x8) -> u16x8 { + msa_bneg_h(a, b) +} + +/// Vector Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (four unsigned 32-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (four unsigned 32-bit integer numbers) +/// modulo thesize of the element in bits. +/// The result is written to vector (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bneg.w))] +unsafe fn __msa_bneg_w(a: u32x4, b: u32x4) -> u32x4 { + msa_bneg_w(a, b) +} + +/// Vector Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (two unsigned 64-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (two unsigned 64-bit integer numbers) +/// modulo thesize of the element in bits. +/// The result is written to vector (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bneg.d))] +unsafe fn __msa_bneg_d(a: u64x2, b: u64x2) -> u64x2 { + msa_bneg_d(a, b) +} + +/// Immediate Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// The bit position is given by immediate imm3 modulo thesize of the element in bits. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnegi.b, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bnegi_b(a: u8x16, imm3: i32) -> u8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_bnegi_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (eight unsigned 16-bit integer numbers) +/// The bit position is given by immediate imm4 modulo thesize of the element in bits. +/// The result is written to vector (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnegi.h, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bnegi_h(a: u16x8, imm4: i32) -> u16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_bnegi_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (four unsigned 32-bit integer numbers) +/// The bit position is given by immediate imm5 modulo thesize of the element in bits. +/// The result is written to vector (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnegi.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bnegi_w(a: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_bnegi_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Bit Negate +/// +/// Negate (complement) one bit in each element of vector `a` (two unsigned 64-bit integer numbers) +/// The bit position is given by immediate imm6 modulo thesize of the element in bits. +/// The result is written to vector (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnegi.d, imm6 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bnegi_d(a: u64x2, imm6: i32) -> u64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_bnegi_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Immediate Branch If All Elements Are Not Zero +/// +/// PC-relative branch if all elements in 'a' (sixteen unsigned 8-bit integer numbers) are not zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnz.b))] +unsafe fn __msa_bnz_b(a: u8x16) -> i32 { + msa_bnz_b(a) +} + +/// Immediate Branch If All Elements Are Not Zero +/// +/// PC-relative branch if all elements in 'a' (eight unsigned 16-bit integer numbers) are not zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnz.h))] +unsafe fn __msa_bnz_h(a: u16x8) -> i32 { + msa_bnz_h(a) +} + +/// Immediate Branch If All Elements Are Not Zero +/// +/// PC-relative branch if all elements in 'a' (four unsigned 32-bit integer numbers) are not zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnz.w))] +unsafe fn __msa_bnz_w(a: u32x4) -> i32 { + msa_bnz_w(a) +} + +/// Immediate Branch If All Elements Are Not Zero +/// +/// PC-relative branch if all elements in 'a' (two unsigned 64-bit integer numbers) are not zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnz.d))] +unsafe fn __msa_bnz_d(a: u64x2) -> i32 { + msa_bnz_d(a) +} + +/// Immediate Branch If Not Zero (At Least One Element of Any Format Is Not Zero) +/// +/// PC-relative branch if at least one bit in 'a' (four unsigned 32-bit integer numbers) are not zero. +/// i.e at least one element is not zero regardless of the data format. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bnz.v))] +unsafe fn __msa_bnz_v(a: u8x16) -> i32 { + msa_bnz_v(a) +} + +/// Vector Bit Select +/// +/// Selectively copy bits from the source vectors 'b' (eight unsigned 16-bit integer numbers) +/// and 'c' (eight unsigned 16-bit integer numbers) +/// into destination vector 'a' (eight unsigned 16-bit integer numbers) based on the corresponding bit in a: +/// if 0 copies the bit from 'b', if 1 copies the bit from 'c'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bsel.v))] +unsafe fn __msa_bsel_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { + msa_bsel_v(a, b, c) +} + +/// Immediate Bit Select +/// +/// Selectively copy bits from the 8-bit immediate imm8 and 'c' (eight unsigned 16-bit integer numbers) +/// into destination vector 'a' (eight unsigned 16-bit integer numbers) based on the corresponding bit in a: +/// if 0 copies the bit from 'b', if 1 copies the bit from 'c'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_bseli_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_bseli_b(a, b, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Vector Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (sixteen unsigned 8-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bset.b))] +unsafe fn __msa_bset_b(a: u8x16, b: u8x16) -> u8x16 { + msa_bset_b(a, b) +} + +/// Vector Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (eight unsigned 16-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (eight unsigned 16-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bset.h))] +unsafe fn __msa_bset_h(a: u16x8, b: u16x8) -> u16x8 { + msa_bset_h(a, b) +} + +/// Vector Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (four unsigned 32-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (four unsigned 32-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bset.w))] +unsafe fn __msa_bset_w(a: u32x4, b: u32x4) -> u32x4 { + msa_bset_w(a, b) +} + +/// Vector Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (two unsigned 64-bit integer numbers) +/// The bit position is given by the elements in vector 'b' (two unsigned 64-bit integer numbers) +/// modulo the size of the element in bits. +/// The result is written to vector (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bset.d))] +unsafe fn __msa_bset_d(a: u64x2, b: u64x2) -> u64x2 { + msa_bset_d(a, b) +} + +/// Immediate Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (sixteen unsigned 8-bit integer numbers) +/// The bit position is given by immediate imm3. +/// The result is written to vector 'a'(sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bseti.b, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bseti_b(a: u8x16, imm3: i32) -> u8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_bseti_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (eight unsigned 16-bit integer numbers) +/// The bit position is given by immediate imm4. +/// The result is written to vector 'a'(eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bseti.h, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bseti_h(a: u16x8, imm4: i32) -> u16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_bseti_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (four unsigned 32-bit integer numbers) +/// The bit position is given by immediate imm5. +/// The result is written to vector 'a'(four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bseti.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bseti_w(a: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_bseti_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Bit Set +/// +/// Set to 1 one bit in each element of vector `a` (two unsigned 64-bit integer numbers) +/// The bit position is given by immediate imm6. +/// The result is written to vector 'a'(two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bseti.d, imm6 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_bseti_d(a: u64x2, imm6: i32) -> u64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_bseti_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Immediate Branch If At Least One Element Is Zero +/// +/// PC-relative branch if at least one element in 'a' (sixteen unsigned 8-bit integer numbers) is zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bz.b))] +unsafe fn __msa_bz_b(a: u8x16) -> i32 { + msa_bz_b(a) +} + +/// Immediate Branch If At Least One Element Is Zero +/// +/// PC-relative branch if at least one element in 'a' (eight unsigned 16-bit integer numbers) is zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bz.h))] +unsafe fn __msa_bz_h(a: u16x8) -> i32 { + msa_bz_h(a) +} + +/// Immediate Branch If At Least One Element Is Zero +/// +/// PC-relative branch if at least one element in 'a' (four unsigned 32-bit integer numbers) is zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bz.w))] +unsafe fn __msa_bz_w(a: u32x4) -> i32 { + msa_bz_w(a) +} + +/// Immediate Branch If At Least One Element Is Zero +/// +/// PC-relative branch if at least one element in 'a' (two unsigned 64-bit integer numbers) is zero. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bz.d))] +unsafe fn __msa_bz_d(a: u64x2) -> i32 { + msa_bz_d(a) +} + +/// Immediate Branch If Zero (All Elements of Any Format Are Zero) +/// +/// PC-relative branch if all elements in 'a' (sixteen unsigned 8-bit integer numbers) bits are zero, +/// i.e. all elements are zero regardless of the data format +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(bz.v))] +unsafe fn __msa_bz_v(a: u8x16) -> i32 { + msa_bz_v(a) +} + +/// Vector Compare Equal +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen signed 8-bit integer numbers) and 'b' (sixteen signed 8-bit integer numbers) +/// elements are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceq.b))] +unsafe fn __msa_ceq_b(a: i8x16, b: i8x16) -> i8x16 { + msa_ceq_b(a, b) +} + +/// Vector Compare Equal +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight signed 16-bit integer numbers) and 'b' (eight signed 16-bit integer numbers) +/// elements are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceq.h))] +unsafe fn __msa_ceq_h(a: i16x8, b: i16x8) -> i16x8 { + msa_ceq_h(a, b) +} + +/// Vector Compare Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four signed 32-bit integer numbers) and 'b' (four signed 32-bit integer numbers) +/// elements are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceq.w))] +unsafe fn __msa_ceq_w(a: i32x4, b: i32x4) -> i32x4 { + msa_ceq_w(a, b) +} + +/// Vector Compare Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two signed 64-bit integer numbers) and 'b' (two signed 64-bit integer numbers) +/// elements are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceq.d))] +unsafe fn __msa_ceq_d(a: i64x2, b: i64x2) -> i64x2 { + msa_ceq_d(a, b) +} + +/// Immediate Compare Equal +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen signed 8-bit integer numbers) the 5-bit signed immediate imm_s5 +/// are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceqi.b, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ceqi_b(a: i8x16, imm_s5: i32) -> i8x16 { + macro_rules! call { + ($imm_s5:expr) => { + msa_ceqi_b(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Equal +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight signed 16-bit integer numbers) the 5-bit signed immediate imm_s5 +/// are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceqi.h, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ceqi_h(a: i16x8, imm_s5: i32) -> i16x8 { + macro_rules! call { + ($imm_s5:expr) => { + msa_ceqi_h(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four signed 32-bit integer numbers) the 5-bit signed immediate imm_s5 +/// are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceqi.w, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ceqi_w(a: i32x4, imm_s5: i32) -> i32x4 { + macro_rules! call { + ($imm_s5:expr) => { + msa_ceqi_w(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two signed 64-bit integer numbers) the 5-bit signed immediate imm_s5 +/// are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ceqi.d, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ceqi_d(a: i64x2, imm_s5: i32) -> i64x2 { + macro_rules! call { + ($imm_s5:expr) => { + msa_ceqi_d(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// GPR Copy from MSA Control Register +/// +/// The sign extended content of MSA control register cs is copied to GPRrd. +/// +/// Can not be tested in user mode +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cfcmsa, imm5 = 0b11111))] +#[rustc_args_required_const(0)] +unsafe fn __msa_cfcmsa(imm5: i32) -> i32 { + macro_rules! call { + ($imm5:expr) => { + msa_cfcmsa($imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen signed 8-bit integer numbers) element +/// are signed less than or equal to 'b' (sixteen signed 8-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_s.b))] +unsafe fn __msa_cle_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_cle_s_b(a, b) +} + +/// Vector Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight signed 16-bit integer numbers) element +/// are signed less than or equal to 'b' (eight signed 16-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_s.h))] +unsafe fn __msa_cle_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_cle_s_h(a, b) +} + +/// Vector Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four signed 32-bit integer numbers) element +/// are signed less than or equal to 'b' (four signed 32-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_s.w))] +unsafe fn __msa_cle_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_cle_s_w(a, b) +} + +/// Vector Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two signed 64-bit integer numbers) element +/// are signed less than or equal to 'b' (two signed 64-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_s.d))] +unsafe fn __msa_cle_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_cle_s_d(a, b) +} + +/// Vector Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen unsigned 8-bit integer numbers) element +/// are unsigned less than or equal to 'b' (sixteen unsigned 8-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_u.b))] +unsafe fn __msa_cle_u_b(a: u8x16, b: u8x16) -> i8x16 { + msa_cle_u_b(a, b) +} + +/// Vector Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight unsigned 16-bit integer numbers) element +/// are unsigned less than or equal to 'b' (eight unsigned 16-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_u.h))] +unsafe fn __msa_cle_u_h(a: u16x8, b: u16x8) -> i16x8 { + msa_cle_u_h(a, b) +} + +/// Vector Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four unsigned 32-bit integer numbers) element +/// are unsigned less than or equal to 'b' (four unsigned 32-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_u.w))] +unsafe fn __msa_cle_u_w(a: u32x4, b: u32x4) -> i32x4 { + msa_cle_u_w(a, b) +} + +/// Vector Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two unsigned 64-bit integer numbers) element +/// are unsigned less than or equal to 'b' (two unsigned 64-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(cle_u.d))] +unsafe fn __msa_cle_u_d(a: u64x2, b: u64x2) -> i64x2 { + msa_cle_u_d(a, b) +} + +/// Immediate Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen signed 8-bit integer numbers) element +/// is less than or equal to the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_s.b, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_s_b(a: i8x16, imm_s5: i32) -> i8x16 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clei_s_b(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight signed 16-bit integer numbers) element +/// is less than or equal to the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_s.h, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_s_h(a: i16x8, imm_s5: i32) -> i16x8 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clei_s_h(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four signed 32-bit integer numbers) element +/// is less than or equal to the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_s.w, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_s_w(a: i32x4, imm_s5: i32) -> i32x4 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clei_s_w(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Signed Less Than or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two signed 64-bit integer numbers) element +/// is less than or equal to the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_s.d, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_s_d(a: i64x2, imm_s5: i32) -> i64x2 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clei_s_d(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen unsigned 8-bit integer numbers) element +/// is unsigned less than or equal to the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_u.b, imm5 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_u_b(a: u8x16, imm5: i32) -> i8x16 { + macro_rules! call { + ($imm5:expr) => { + msa_clei_u_b(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight unsigned 16-bit integer numbers) element +/// is unsigned less than or equal to the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_u.h, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_u_h(a: u16x8, imm5: i32) -> i16x8 { + macro_rules! call { + ($imm5:expr) => { + msa_clei_u_h(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four unsigned 32-bit integer numbers) element +/// is unsigned less than or equal to the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_u.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_u_w(a: u32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_clei_u_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Compare Unsigned Less Than or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two unsigned 64-bit integer numbers) element +/// is unsigned less than or equal to the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clei_u.d, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clei_u_d(a: u64x2, imm5: i32) -> i64x2 { + macro_rules! call { + ($imm5:expr) => { + msa_clei_u_d(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Compare Signed Less Than +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen signed 8-bit integer numbers) element +/// are signed less than 'b' (sixteen signed 8-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_s.b))] +unsafe fn __msa_clt_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_clt_s_b(a, b) +} + +/// Vector Compare Signed Less Than +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight signed 16-bit integer numbers) element +/// are signed less than 'b' (eight signed 16-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_s.h))] +unsafe fn __msa_clt_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_clt_s_h(a, b) +} + +/// Vector Compare Signed Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four signed 32-bit integer numbers) element +/// are signed less than 'b' (four signed 32-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_s.w))] +unsafe fn __msa_clt_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_clt_s_w(a, b) +} + +/// Vector Compare Signed Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two signed 64-bit integer numbers) element +/// are signed less than 'b' (two signed 64-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_s.d))] +unsafe fn __msa_clt_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_clt_s_d(a, b) +} + +/// Vector Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen unsigned 8-bit integer numbers) element +/// are unsigned less than 'b' (sixteen unsigned 8-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_u.b))] +unsafe fn __msa_clt_u_b(a: u8x16, b: u8x16) -> i8x16 { + msa_clt_u_b(a, b) +} + +/// Vector Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight unsigned 16-bit integer numbers) element +/// are unsigned less than 'b' (eight unsigned 16-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_u.h))] +unsafe fn __msa_clt_u_h(a: u16x8, b: u16x8) -> i16x8 { + msa_clt_u_h(a, b) +} + +/// Vector Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four unsigned 32-bit integer numbers) element +/// are unsigned less than 'b' (four unsigned 32-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_u.w))] +unsafe fn __msa_clt_u_w(a: u32x4, b: u32x4) -> i32x4 { + msa_clt_u_w(a, b) +} + +/// Vector Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two unsigned 64-bit integer numbers) element +/// are unsigned less than 'b' (two unsigned 64-bit integer numbers) element. +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clt_u.d))] +unsafe fn __msa_clt_u_d(a: u64x2, b: u64x2) -> i64x2 { + msa_clt_u_d(a, b) +} + +/// Immediate Compare Signed Less Than +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen signed 8-bit integer numbers) element +/// is less than the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_s.b, imm_s5 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_s_b(a: i8x16, imm_s5: i32) -> i8x16 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clti_s_b(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Signed Less Than +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight signed 16-bit integer numbers) element +/// is less than the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_s.h, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_s_h(a: i16x8, imm_s5: i32) -> i16x8 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clti_s_h(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Signed Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four signed 32-bit integer numbers) element +/// is less than the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_s.w, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_s_w(a: i32x4, imm_s5: i32) -> i32x4 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clti_s_w(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Signed Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two signed 64-bit integer numbers) element +/// is less than the 5-bit signed immediate imm_s5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_s.d, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_s_d(a: i64x2, imm_s5: i32) -> i64x2 { + macro_rules! call { + ($imm_s5:expr) => { + msa_clti_s_d(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (sixteen signed 8-bit integer numbers) elements +/// if the corresponding 'a' (sixteen unsigned 8-bit integer numbers) element +/// is unsigned less than the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_u.b, imm5 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_u_b(a: u8x16, imm5: i32) -> i8x16 { + macro_rules! call { + ($imm5:expr) => { + msa_clti_u_b(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (eight signed 16-bit integer numbers) elements +/// if the corresponding 'a' (eight unsigned 16-bit integer numbers) element +/// is unsigned less than the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_u.h, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_u_h(a: u16x8, imm5: i32) -> i16x8 { + macro_rules! call { + ($imm5:expr) => { + msa_clti_u_h(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four unsigned 32-bit integer numbers) element +/// is unsigned less than the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_u.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_u_w(a: u32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_clti_u_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Compare Unsigned Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two unsigned 64-bit integer numbers) element +/// is unsigned less than the 5-bit unsigned immediate imm5, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(clti_u.d, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_clti_u_d(a: u64x2, imm5: i32) -> i64x2 { + macro_rules! call { + ($imm5:expr) => { + msa_clti_u_d(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Element Copy to GPR Signed +/// +/// Sign-extend element imm4 of vector 'a' (sixteen signed 8-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_s.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_s_b(a: i8x16, imm4: i32) -> i32 { + macro_rules! call { + ($imm4:expr) => { + msa_copy_s_b(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Element Copy to GPR Signed +/// +/// Sign-extend element imm3 of vector 'a' (eight signed 16-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_s.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_s_h(a: i16x8, imm3: i32) -> i32 { + macro_rules! call { + ($imm3:expr) => { + msa_copy_s_h(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Element Copy to GPR Signed +/// +/// Sign-extend element imm2 of vector 'a' (four signed 32-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_s.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_s_w(a: i32x4, imm2: i32) -> i32 { + macro_rules! call { + ($imm2:expr) => { + msa_copy_s_w(a, $imm2) + }; + } + constify_imm2!(imm2, call) +} + +/// Element Copy to GPR Signed +/// +/// Sign-extend element imm1 of vector 'a' (two signed 64-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_s.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_s_d(a: i64x2, imm1: i32) -> i64 { + macro_rules! call { + ($imm1:expr) => { + msa_copy_s_d(a, $imm1) + }; + } + constify_imm1!(imm1, call) +} + +/// Element Copy to GPR Unsigned +/// +/// Zero-extend element imm4 of vector 'a' (sixteen signed 8-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_u.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_u_b(a: i8x16, imm4: i32) -> u32 { + macro_rules! call { + ($imm4:expr) => { + msa_copy_u_b(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Element Copy to GPR Unsigned +/// +/// Zero-extend element imm3 of vector 'a' (eight signed 16-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_u.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_u_h(a: i16x8, imm3: i32) -> u32 { + macro_rules! call { + ($imm3:expr) => { + msa_copy_u_h(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Element Copy to GPR Unsigned +/// +/// Zero-extend element imm2 of vector 'a' (four signed 32-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_u.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_u_w(a: i32x4, imm2: i32) -> u32 { + macro_rules! call { + ($imm2:expr) => { + msa_copy_u_w(a, $imm2) + }; + } + constify_imm2!(imm2, call) +} + +/// Element Copy to GPR Unsigned +/// +/// Zero-extend element imm1 of vector 'a' (two signed 64-bit integer numbers) +/// and copy the result to GPR rd +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(copy_u.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_copy_u_d(a: i64x2, imm1: i32) -> u64 { + macro_rules! call { + ($imm1:expr) => { + msa_copy_u_d(a, $imm1) + }; + } + constify_imm1!(imm1, call) +} + +/// GPR Copy to MSA Control Register +/// The content of the least significant 31 bits of GPR imm1 is copied to +/// MSA control register cd +/// Can not be tested in user mode +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ctcmsa, imm1 = 0b1))] +#[rustc_args_required_const(0)] +unsafe fn __msa_ctcmsa(imm5: i32, a: i32) -> () { + macro_rules! call { + ($imm5:expr) => { + msa_ctcmsa($imm5, a) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Signed Divide +/// +/// The signed integer elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// are divided by signed integer elements in vector 'b' (sixteen signed 8-bit integer numbers). +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_s.b))] +unsafe fn __msa_div_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_div_s_b(a, b) +} + +/// Vector Signed Divide +/// +/// The signed integer elements in vector 'a' (eight signed 16-bit integer numbers) +/// are divided by signed integer elements in vector 'b' (eight signed 16-bit integer numbers). +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_s.h))] +unsafe fn __msa_div_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_div_s_h(a, b) +} + +/// Vector Signed Divide +/// +/// The signed integer elements in vector 'a' (four signed 32-bit integer numbers) +/// are divided by signed integer elements in vector 'b' (four signed 32-bit integer numbers). +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_s.w))] +unsafe fn __msa_div_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_div_s_w(a, b) +} + +/// Vector Signed Divide +/// +/// The signed integer elements in vector 'a' (two signed 64-bit integer numbers) +/// are divided by signed integer elements in vector 'b' (two signed 64-bit integer numbers). +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_s.d))] +unsafe fn __msa_div_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_div_s_d(a, b) +} + +/// Vector Unsigned Divide +/// +/// The unsigned integer elements in vector 'a' (sixteen unsigned 8-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b' (sixteen unsigned 8-bit integer numbers). +/// The result is written to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_u.b))] +unsafe fn __msa_div_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_div_u_b(a, b) +} + +/// Vector Unsigned Divide +/// +/// The unsigned integer elements in vector 'a' (eight unsigned 16-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b' (eight unsigned 16-bit integer numbers). +/// The result is written to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_u.h))] +unsafe fn __msa_div_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_div_u_h(a, b) +} + +/// Vector Unsigned Divide +/// +/// The unsigned integer elements in vector 'a' (four unsigned 32-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b' (four unsigned 32-bit integer numbers). +/// The result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_u.w))] +unsafe fn __msa_div_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_div_u_w(a, b) +} + +/// Vector Unsigned Divide +/// +/// The unsigned integer elements in vector 'a' (two unsigned 64-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b' (two unsigned 64-bit integer numbers). +/// The result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(div_u.d))] +unsafe fn __msa_div_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_div_u_d(a, b) +} + +/// Vector Signed Dot Product +/// +/// The signed integer elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// are multiplied by signed integer elements in vector 'b' (sixteen signed 8-bit integer numbers). +/// producing a result the size of the input operands. The multiplication resultsof +/// adjacent odd/even elements are added and stored to the destination +/// vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dotp_s.h))] +unsafe fn __msa_dotp_s_h(a: i8x16, b: i8x16) -> i16x8 { + msa_dotp_s_h(a, b) +} + +/// Vector Signed Dot Product +/// +/// The signed integer elements in vector 'a' (eight signed 16-bit integer numbers) +/// are multiplied by signed integer elements in vector 'b' (eight signed 16-bit integer numbers). +/// producing a result the size of the input operands. The multiplication resultsof +/// adjacent odd/even elements are added and stored to the destination +/// vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dotp_s.w))] +unsafe fn __msa_dotp_s_w(a: i16x8, b: i16x8) -> i32x4 { + msa_dotp_s_w(a, b) +} + +/// Vector Signed Dot Product +/// +/// The signed integer elements in vector 'a' (four signed 32-bit integer numbers) +/// are multiplied by signed integer elements in vector 'b' (four signed 32-bit integer numbers). +/// producing a result the size of the input operands. The multiplication resultsof +/// adjacent odd/even elements are added and stored to the destination +/// vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dotp_s.d))] +unsafe fn __msa_dotp_s_d(a: i32x4, b: i32x4) -> i64x2 { + msa_dotp_s_d(a, b) +} + +/// Vector Unsigned Dot Product +/// +/// The unsigned integer elements in vector 'a' (sixteen unsigned 8-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'b' (sixteen unsigned 8-bit integer numbers). +/// producing a result the size of the input operands. The multiplication resultsof +/// adjacent odd/even elements are added and stored to the destination +/// vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dotp_u.h))] +unsafe fn __msa_dotp_u_h(a: u8x16, b: u8x16) -> u16x8 { + msa_dotp_u_h(a, b) +} + +/// Vector Unsigned Dot Product +/// +/// The unsigned integer elements in vector 'a' (eight unsigned 16-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'b' (eight unsigned 16-bit integer numbers). +/// producing a result the size of the input operands. The multiplication resultsof +/// adjacent odd/even elements are added and stored to the destination +/// vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dotp_u.w))] +unsafe fn __msa_dotp_u_w(a: u16x8, b: u16x8) -> u32x4 { + msa_dotp_u_w(a, b) +} + +/// Vector Unsigned Dot Product +/// +/// The unsigned integer elements in vector 'a' (four unsigned 32-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'b' (four unsigned 32-bit integer numbers). +/// producing a result the size of the input operands. The multiplication resultsof +/// adjacent odd/even elements are added and stored to the destination +/// vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dotp_u.d))] +unsafe fn __msa_dotp_u_d(a: u32x4, b: u32x4) -> u64x2 { + msa_dotp_u_d(a, b) +} + +/// Vector Signed Dot Product and Add +/// +/// The signed integer elements in vector 'b' (sixteen signed 8-bit integer numbers) +/// are multiplied by signed integer elements in vector 'c' (sixteen signed 8-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are added to the vector 'a' (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpadd_s.h))] +unsafe fn __msa_dpadd_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8 { + msa_dpadd_s_h(a, b, c) +} + +/// Vector Signed Dot Product and Add +/// +/// The signed integer elements in vector 'b' (eight signed 16-bit integer numbers) +/// are multiplied by signed integer elements in vector 'c' (eight signed 16-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are added to the vector 'a' (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpadd_s.w))] +unsafe fn __msa_dpadd_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4 { + msa_dpadd_s_w(a, b, c) +} + +/// Vector Signed Dot Product and Add +/// +/// The signed integer elements in vector 'b' (four signed 32-bit integer numbers) +/// are multiplied by signed integer elements in vector 'c' (four signed 32-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are added to the vector 'a' (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpadd_s.d))] +unsafe fn __msa_dpadd_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2 { + msa_dpadd_s_d(a, b, c) +} + +/// Vector Unsigned Dot Product and Add +/// +/// The unsigned integer elements in vector 'b' (sixteen unsigned 8-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'c' (sixteen unsigned 8-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are added to the vector 'a' (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpadd_u.h))] +unsafe fn __msa_dpadd_u_h(a: u16x8, b: u8x16, c: u8x16) -> u16x8 { + msa_dpadd_u_h(a, b, c) +} + +/// Vector Unsigned Dot Product and Add +/// +/// The unsigned integer elements in vector 'b' (eight unsigned 16-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'c' (eight unsigned 16-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are added to the vector 'a' (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpadd_u.w))] +unsafe fn __msa_dpadd_u_w(a: u32x4, b: u16x8, c: u16x8) -> u32x4 { + msa_dpadd_u_w(a, b, c) +} + +/// Vector Unsigned Dot Product and Add +/// +/// The unsigned integer elements in vector 'b' (four unsigned 32-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'c' (four unsigned 32-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are added to the vector 'a' (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpadd_u.d))] +unsafe fn __msa_dpadd_u_d(a: u64x2, b: u32x4, c: u32x4) -> u64x2 { + msa_dpadd_u_d(a, b, c) +} + +/// Vector Signed Dot Product and Add +/// +/// The signed integer elements in vector 'b' (sixteen signed 8-bit integer numbers) +/// are multiplied by signed integer elements in vector 'c' (sixteen signed 8-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are sub-tracted from the integer elements in vector 'a' +/// (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpsub_s.h))] +unsafe fn __msa_dpsub_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8 { + msa_dpsub_s_h(a, b, c) +} + +/// Vector Signed Dot Product and Add +/// +/// The signed integer elements in vector 'b' (eight signed 16-bit integer numbers) +/// are multiplied by signed integer elements in vector 'c' (eight signed 16-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are sub-tracted from the integer elements in vector 'a' +/// (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpsub_s.w))] +unsafe fn __msa_dpsub_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4 { + msa_dpsub_s_w(a, b, c) +} + +/// Vector Signed Dot Product and Add +/// +/// The signed integer elements in vector 'b' (four signed 32-bit integer numbers) +/// are multiplied by signed integer elements in vector 'c' (four signed 32-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are sub-tracted from the integer elements in vector 'a' +/// (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpsub_s.d))] +unsafe fn __msa_dpsub_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2 { + msa_dpsub_s_d(a, b, c) +} + +/// Vector Unsigned Dot Product and Add +/// +/// The unsigned integer elements in vector 'b' (sixteen unsigned 8-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'c' (sixteen unsigned 8-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are sub-tracted from the integer elements in vector 'a' +/// (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpsub_u.h))] +unsafe fn __msa_dpsub_u_h(a: i16x8, b: u8x16, c: u8x16) -> i16x8 { + msa_dpsub_u_h(a, b, c) +} + +/// Vector Unsigned Dot Product and Add +/// +/// The unsigned integer elements in vector 'b' (eight unsigned 16-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'c' (eight unsigned 16-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are sub-tracted from the integer elements in vector 'a' +/// (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpsub_u.w))] +unsafe fn __msa_dpsub_u_w(a: i32x4, b: u16x8, c: u16x8) -> i32x4 { + msa_dpsub_u_w(a, b, c) +} + +/// Vector Unsigned Dot Product and Add +/// +/// The unsigned integer elements in vector 'b' (four unsigned 32-bit integer numbers) +/// are multiplied by unsigned integer elements in vector 'c' (four unsigned 32-bit integer numbers). +/// producing a result twice the size of the input operands. The multiplication results +/// of adjacent odd/even elements are sub-tracted from the integer elements in vector 'a' +/// (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(dpsub_u.d))] +unsafe fn __msa_dpsub_u_d(a: i64x2, b: u32x4, c: u32x4) -> i64x2 { + msa_dpsub_u_d(a, b, c) +} + +/// Vector Floating-Point Addition +/// +/// The floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are added to the floating-point elements in 'bc' (four 32-bit floating point numbers). +/// The result is written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fadd.w))] +unsafe fn __msa_fadd_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fadd_w(a, b) +} + +/// Vector Floating-Point Addition +/// +/// The floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// are added to the floating-point elements in 'bc' (two 64-bit floating point numbers). +/// The result is written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fadd.d))] +unsafe fn __msa_fadd_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fadd_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Always False +/// +/// Set all bits to 0 in vector (four signed 32-bit integer numbers) +/// Signaling NaN elements in 'a' (four 32-bit floating point numbers) +/// or 'b' (four 32-bit floating point numbers) signal Invalid Operation exception. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcaf.w))] +unsafe fn __msa_fcaf_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcaf_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Always False +/// +/// Set all bits to 0 in vector (two signed 64-bit integer numbers) +/// Signaling NaN elements in 'a' (two 64-bit floating point numbers) +/// or 'b' (two 64-bit floating point numbers) signal Invalid Operation exception. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcaf.d))] +unsafe fn __msa_fcaf_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcaf_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding in 'a' (four 32-bit floating point numbers) +/// and 'b' (four 32-bit floating point numbers) elements are ordered and equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fceq.w))] +unsafe fn __msa_fceq_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fceq_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding in 'a' (two 64-bit floating point numbers) +/// and 'b' (two 64-bit floating point numbers) elementsare ordered and equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fceq.d))] +unsafe fn __msa_fceq_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fceq_d(a, b) +} + +/// Vector Floating-Point Class Mask +/// +/// Store in each element of vector (four signed 32-bit integer numbers) +/// a bit mask reflecting the floating-point class of the corresponding element of vector +/// 'a' (four 32-bit floating point numbers). +/// The mask has 10 bits as follows. Bits 0 and 1 indicate NaN values: signaling NaN (bit 0) and quiet NaN (bit 1). +/// Bits 2, 3, 4, 5 classify negative values: infinity (bit 2), normal (bit 3), subnormal (bit 4), and zero (bit 5). +/// Bits 6, 7, 8, 9classify positive values:infinity (bit 6), normal (bit 7), subnormal (bit 8), and zero (bit 9). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fclass.w))] +unsafe fn __msa_fclass_w(a: f32x4) -> i32x4 { + msa_fclass_w(a) +} + +/// Vector Floating-Point Class Mask +/// +/// Store in each element of vector (wto signed 64-bit integer numbers) +/// a bit mask reflecting the floating-point class of the corresponding element of vector +/// 'a' (wto 64-bit floating point numbers). +/// The mask has 10 bits as follows. Bits 0 and 1 indicate NaN values: signaling NaN (bit 0) and quiet NaN (bit 1). +/// Bits 2, 3, 4, 5 classify negative values: infinity (bit 2), normal (bit 3), subnormal (bit 4), and zero (bit 5). +/// Bits 6, 7, 8, 9classify positive values:infinity (bit 6), normal (bit 7), subnormal (bit 8), and zero (bit 9). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fclass.d))] +unsafe fn __msa_fclass_d(a: f64x2) -> i64x2 { + msa_fclass_d(a) +} + +/// Vector Floating-Point Quiet Compare Less or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) elements are ordered +/// and either less than or equal to 'b' (four 32-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcle.w))] +unsafe fn __msa_fcle_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcle_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Less or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) elementsare ordered +/// and either less than or equal to 'b' (two 64-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcle.d))] +unsafe fn __msa_fcle_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcle_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) elements are ordered +/// and less than 'b' (four 32-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fclt.w))] +unsafe fn __msa_fclt_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fclt_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) elementsare ordered +/// and less than 'b' (two 64-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fclt.d))] +unsafe fn __msa_fclt_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fclt_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Not Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are ordered and not equal +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcne.w))] +unsafe fn __msa_fcne_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcne_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Not Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare ordered and not equal +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcne.d))] +unsafe fn __msa_fcne_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcne_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Ordered +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are ordered, i.e. both elementsare not NaN values, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcor.w))] +unsafe fn __msa_fcor_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcor_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Ordered +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare ordered, i.e. both elementsare not NaN values, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcor.d))] +unsafe fn __msa_fcor_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcor_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are unordered or equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcueq.w))] +unsafe fn __msa_fcueq_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcueq_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare unordered or equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcueq.d))] +unsafe fn __msa_fcueq_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcueq_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Less or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding elements in 'a' (four 32-bit floating point numbers) +/// are unordered or less than or equal to 'b' (four 32-bit floating point numbers) elements, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcule.w))] +unsafe fn __msa_fcule_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcule_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Less or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding elements in 'a' (two 64-bit floating point numbers) +/// are unordered or less than or equal to 'b' (two 64-bit floating point numbers) elements, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcule.d))] +unsafe fn __msa_fcule_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcule_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding elements in 'a' (four 32-bit floating point numbers) +/// are unordered or less than 'b' (four 32-bit floating point numbers) elements, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcult.w))] +unsafe fn __msa_fcult_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcult_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding elements in 'a' (two 64-bit floating point numbers) +/// are unordered or less than 'b' (two 64-bit floating point numbers) elements, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcult.d))] +unsafe fn __msa_fcult_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcult_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) +/// and 'b' (four 32-bit floating point numbers) elements are unordered, +/// i.e. at least oneelement is a NaN value, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcun.w))] +unsafe fn __msa_fcun_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcun_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) +/// and 'b' (two 64-bit floating point numbers) elementsare unordered, +/// i.e. at least oneelement is a NaN value, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcun.d))] +unsafe fn __msa_fcun_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcun_d(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Not Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) +/// elements if the corresponding 'a' (four 32-bit floating point numbers) +/// and 'b' (four 32-bit floating point numbers) elements are unordered or not equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcune.w))] +unsafe fn __msa_fcune_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fcune_w(a, b) +} + +/// Vector Floating-Point Quiet Compare Unordered or Not Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) +/// elements if the corresponding 'a' (two 64-bit floating point numbers) +/// and 'b' (two 64-bit floating point numbers) elementsare unordered or not equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fcune.d))] +unsafe fn __msa_fcune_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fcune_d(a, b) +} + +/// Vector Floating-Point Division +/// +/// The floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are divided by the floating-point elements in vector 'b' (four 32-bit floating point numbers) +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fdiv.w))] +unsafe fn __msa_fdiv_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fdiv_w(a, b) +} + +/// Vector Floating-Point Division +/// +/// The floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// are divided by the floating-point elements in vector 'b' (two 64-bit floating point numbers) +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fdiv.d))] +unsafe fn __msa_fdiv_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fdiv_d(a, b) +} + +/* FIXME: 16-bit float +/// Vector Floating-Point Down-Convert Interchange Format +/// +/// The floating-point elements in vector 'a' (four 64-bit floating point numbers) +/// and vector 'b' (four 64-bit floating point numbers) are down-converted +/// to a smaller interchange format, i.e. from 64-bitto 32-bit, or from 32-bit to 16-bit. +/// The result is written to vector (8 16-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexdo.h))] +unsafe fn __msa_fexdo_h(a: f32x4, b: f32x4) -> f16x8 { + msa_fexdo_h(a, b) +}*/ + +/// Vector Floating-Point Down-Convert Interchange Format +/// +/// The floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// and vector 'b' (two 64-bit floating point numbers) are down-converted +/// to a smaller interchange format, i.e. from 64-bitto 32-bit, or from 32-bit to 16-bit. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexdo.w))] +unsafe fn __msa_fexdo_w(a: f64x2, b: f64x2) -> f32x4 { + msa_fexdo_w(a, b) +} + +/// Vector Floating-Point Down-Convert Interchange Format +/// +/// The floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are scaled, i.e. multiplied, by 2 to the power of integer elements in vector 'b' +/// (four signed 32-bit integer numbers). +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexp2.w))] +unsafe fn __msa_fexp2_w(a: f32x4, b: i32x4) -> f32x4 { + msa_fexp2_w(a, b) +} + +/// Vector Floating-Point Down-Convert Interchange Format +/// +/// The floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// are scaled, i.e. multiplied, by 2 to the power of integer elements in vector 'b' +/// (two signed 64-bit integer numbers). +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexp2.d))] +unsafe fn __msa_fexp2_d(a: f64x2, b: i64x2) -> f64x2 { + msa_fexp2_d(a, b) +} + +/* FIXME: 16-bit float +/// Vector Floating-Point Up-Convert Interchange Format Left +/// +/// The left half floating-point elements in vector 'a' (two 16-bit floating point numbers) +/// are up-converted to a larger interchange format, +/// i.e. from 16-bit to 32-bit, or from 32-bit to 64-bit. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexupl.w))] +unsafe fn __msa_fexupl_w(a: f16x8) -> f32x4 { + msa_fexupl_w(a) +}*/ + +/// Vector Floating-Point Up-Convert Interchange Format Left +/// +/// The left half floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are up-converted to a larger interchange format, +/// i.e. from 16-bit to 32-bit, or from 32-bit to 64-bit. +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexupl.d))] +unsafe fn __msa_fexupl_d(a: f32x4) -> f64x2 { + msa_fexupl_d(a) +} + +/* FIXME: 16-bit float +/// Vector Floating-Point Up-Convert Interchange Format Left +/// +/// The right half floating-point elements in vector 'a' (two 16-bit floating point numbers) +/// are up-converted to a larger interchange format, +/// i.e. from 16-bit to 32-bit, or from 32-bit to 64-bit. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexupr.w))] +unsafe fn __msa_fexupr_w(a: f16x8) -> f32x4 { + msa_fexupr_w(a) +} */ + +/// Vector Floating-Point Up-Convert Interchange Format Left +/// +/// The right half floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are up-converted to a larger interchange format, +/// i.e. from 16-bit to 32-bit, or from 32-bit to 64-bit. +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fexupr.d))] +unsafe fn __msa_fexupr_d(a: f32x4) -> f64x2 { + msa_fexupr_d(a) +} + +/// Vector Floating-Point Round and Convert from Signed Integer +/// +/// The signed integer elements in vector 'a' (four signed 32-bit integer numbers) +/// are converted to floating-point values. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffint_s.w))] +unsafe fn __msa_ffint_s_w(a: i32x4) -> f32x4 { + msa_ffint_s_w(a) +} + +/// Vector Floating-Point Round and Convert from Signed Integer +/// +/// The signed integer elements in vector 'a' (two signed 64-bit integer numbers) +/// are converted to floating-point values. +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffint_s.d))] +unsafe fn __msa_ffint_s_d(a: i64x2) -> f64x2 { + msa_ffint_s_d(a) +} + +/// Vector Floating-Point Round and Convert from Unsigned Integer +/// +/// The unsigned integer elements in vector 'a' (four unsigned 32-bit integer numbers) +/// are converted to floating-point values. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffint_u.w))] +unsafe fn __msa_ffint_u_w(a: u32x4) -> f32x4 { + msa_ffint_u_w(a) +} + +/// Vector Floating-Point Round and Convert from Unsigned Integer +/// +/// The unsigned integer elements in vector 'a' (two unsigned 64-bit integer numbers) +/// are converted to floating-point values. +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffint_u.d))] +unsafe fn __msa_ffint_u_d(a: u64x2) -> f64x2 { + msa_ffint_u_d(a) +} + +/// Vector Floating-Point Convert from Fixed-Point Left +/// +/// The left half fixed-point elements in vector 'a' (eight signed 16-bit integer numbers) +/// are up-converted to floating-point data format. +/// i.e. from 16-bit Q15 to 32-bit floating-point, or from 32-bit Q31 to 64-bit floating-point. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffql.w))] +unsafe fn __msa_ffql_w(a: i16x8) -> f32x4 { + msa_ffql_w(a) +} + +/// Vector Floating-Point Convert from Fixed-Point Left +/// +/// The left half fixed-point elements in vector 'a' (four signed 32-bit integer numbers) +/// are up-converted to floating-point data format. +/// i.e. from 16-bit Q15 to 32-bit floating-point, or from 32-bit Q31 to 64-bit floating-point. +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffql.d))] +unsafe fn __msa_ffql_d(a: i32x4) -> f64x2 { + msa_ffql_d(a) +} + +/// Vector Floating-Point Convert from Fixed-Point Left +/// +/// The right half fixed-point elements in vector 'a' (eight signed 16-bit integer numbers) +/// are up-converted to floating-point data format. +/// i.e. from 16-bit Q15 to 32-bit floating-point, or from 32-bit Q31 to 64-bit floating-point. +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffqr.w))] +unsafe fn __msa_ffqr_w(a: i16x8) -> f32x4 { + msa_ffqr_w(a) +} + +/// Vector Floating-Point Convert from Fixed-Point Left +/// +/// The right half fixed-point elements in vector 'a' (four signed 32-bit integer numbers) +/// are up-converted to floating-point data format. +/// i.e. from 16-bit Q15 to 32-bit floating-point, or from 32-bit Q31 to 64-bit floating-point. +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ffqr.d))] +unsafe fn __msa_ffqr_d(a: i32x4) -> f64x2 { + msa_ffqr_d(a) +} + +/// Vector Fill from GPR +/// +/// Replicate GPR rs value to all elements in vector (sixteen signed 8-bit integer numbers). +/// If the source GPR is wider than the destination data format, the destination's elements +/// will be set to the least significant bits of the GPR +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fill.b))] +unsafe fn __msa_fill_b(a: i32) -> i8x16 { + msa_fill_b(a) +} + +/// Vector Fill from GPR +/// +/// Replicate GPR rs value to all elements in vector (eight signed 16-bit integer numbers). +/// If the source GPR is wider than the destination data format, the destination's elements +/// will be set to the least significant bits of the GPR +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fill.h))] +unsafe fn __msa_fill_h(a: i32) -> i16x8 { + msa_fill_h(a) +} + +/// Vector Fill from GPR +/// +/// Replicate GPR rs value to all elements in vector (four signed 32-bit integer numbers). +/// If the source GPR is wider than the destination data format, the destination's elements +/// will be set to the least significant bits of the GPR +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fill.w))] +unsafe fn __msa_fill_w(a: i32) -> i32x4 { + msa_fill_w(a) +} + +/// Vector Fill from GPR +/// +/// Replicate GPR rs value to all elements in vector (two signed 64-bit integer numbers). +/// If the source GPR is wider than the destination data format, the destination's elements +/// will be set to the least significant bits of the GPR +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fill.d))] +unsafe fn __msa_fill_d(a: i64) -> i64x2 { + msa_fill_d(a) +} + +/// Vector Floating-Point Base 2 Logarithm +/// +/// The signed integral base 2 exponents of floating-point elements in vector 'a' +/// (four 32-bit floating point numbers) are written as floating-point values to vector elements +/// (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(flog2.w))] +unsafe fn __msa_flog2_w(a: f32x4) -> f32x4 { + msa_flog2_w(a) +} + +/// Vector Floating-Point Base 2 Logarithm +/// +/// The signed integral base 2 exponents of floating-point elements in vector 'a' +/// (two 64-bit floating point numbers) are written as floating-point values to vector elements +/// (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(flog2.d))] +unsafe fn __msa_flog2_d(a: f64x2) -> f64x2 { + msa_flog2_d(a) +} + +/// Vector Floating-Point Multiply-Add +/// +/// The floating-point elements in vector 'b' (four 32-bit floating point numbers) +/// multiplied by floating-point elements in vector 'c' (four 32-bit floating point numbers) +/// are added to the floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmadd.w))] +unsafe fn __msa_fmadd_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4 { + msa_fmadd_w(a, b, c) +} + +/// Vector Floating-Point Multiply-Add +/// +/// The floating-point elements in vector 'b' (two 64-bit floating point numbers) +/// multiplied by floating-point elements in vector 'c' (two 64-bit floating point numbers) +/// are added to the floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmadd.d))] +unsafe fn __msa_fmadd_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2 { + msa_fmadd_d(a, b, c) +} + +/// Vector Floating-Point Maximum +/// +/// The largest values between corresponding floating-point elements in vector 'a' +/// (four 32-bit floating point numbers) andvector 'b' (four 32-bit floating point numbers) +/// are written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmax.w))] +unsafe fn __msa_fmax_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fmax_w(a, b) +} + +/// Vector Floating-Point Maximum +/// +/// The largest values between corresponding floating-point elements in vector 'a' +/// (two 64-bit floating point numbers) and vector 'b' (two 64-bit floating point numbers) +/// are written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmax.d))] +unsafe fn __msa_fmax_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fmax_d(a, b) +} + +/// Vector Floating-Point Maximum Based on Absolute Values +/// +/// The value with the largest magnitude, i.e. absolute value, between corresponding +/// floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// and vector 'b' (four 32-bit floating point numbers) +/// are written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmax_a.w))] +unsafe fn __msa_fmax_a_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fmax_a_w(a, b) +} + +/// Vector Floating-Point Maximum Based on Absolute Values +/// +/// The value with the largest magnitude, i.e. absolute value, between corresponding +/// floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// and vector 'b' (two 64-bit floating point numbers) +/// are written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmax_a.d))] +unsafe fn __msa_fmax_a_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fmax_a_d(a, b) +} + +/// Vector Floating-Point Minimum +/// +/// The smallest values between corresponding floating-point elements in vector 'a' +/// (four 32-bit floating point numbers) andvector 'b' (four 32-bit floating point numbers) +/// are written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmin.w))] +unsafe fn __msa_fmin_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fmin_w(a, b) +} + +/// Vector Floating-Point Minimum +/// +/// The smallest values between corresponding floating-point elements in vector 'a' +/// (two 64-bit floating point numbers) and vector 'b' (two 64-bit floating point numbers) +/// are written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmin.d))] +unsafe fn __msa_fmin_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fmin_d(a, b) +} + +/// Vector Floating-Point Minimum Based on Absolute Values +/// +/// The value with the smallest magnitude, i.e. absolute value, between corresponding +/// floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// and vector 'b' (four 32-bit floating point numbers) +/// are written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmin_a.w))] +unsafe fn __msa_fmin_a_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fmin_a_w(a, b) +} + +/// Vector Floating-Point Minimum Based on Absolute Values +/// +/// The value with the smallest magnitude, i.e. absolute value, between corresponding +/// floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// and vector 'b' (two 64-bit floating point numbers) +/// are written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmin_a.d))] +unsafe fn __msa_fmin_a_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fmin_a_d(a, b) +} + +/// Vector Floating-Point Multiply-Sub +/// +/// The floating-point elements in vector 'b' (four 32-bit floating point numbers) +/// multiplied by floating-point elements in vector 'c' (four 32-bit floating point numbers) +/// are subtracted from the floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmsub.w))] +unsafe fn __msa_fmsub_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4 { + msa_fmsub_w(a, b, c) +} + +/// Vector Floating-Point Multiply-Sub +/// +/// The floating-point elements in vector 'b' (two 64-bit floating point numbers) +/// multiplied by floating-point elements in vector 'c' (two 64-bit floating point numbers) +/// are subtracted from the floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmsub.d))] +unsafe fn __msa_fmsub_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2 { + msa_fmsub_d(a, b, c) +} + + +/// Vector Floating-Point Multiplication +/// +/// The floating-point elements in vector 'a' (four 32-bit floating point numbers) are +/// multiplied by floating-point elements in vector 'b' (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmul.w))] +unsafe fn __msa_fmul_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fmul_w(a, b) +} + +/// Vector Floating-Point Multiplication +/// +/// The floating-point elements in vector 'a' (two 64-bit floating point numbers) are +/// multiplied by floating-point elements in vector 'b' (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fmul.d))] +unsafe fn __msa_fmul_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fmul_d(a, b) +} + +/// Vector Floating-Point Round to Integer +/// +/// The floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are rounded to an integral valued floating-point number in the same format based +/// on the rounding mode bits RM in MSA Control and Status Register MSACSR. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(frint.w))] +unsafe fn __msa_frint_w(a: f32x4) -> f32x4 { + msa_frint_w(a) +} + +/// Vector Floating-Point Round to Integer +/// +/// The floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// are rounded to an integral valued floating-point number in the same format based +/// on the rounding mode bits RM in MSA Control and Status Register MSACSR. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(frint.d))] +unsafe fn __msa_frint_d(a: f64x2) -> f64x2 { + msa_frint_d(a) +} + +/// Vector Approximate Floating-Point Reciprocal +/// +/// The reciprocals of floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are calculated and the result is written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(frcp.w))] +unsafe fn __msa_frcp_w(a: f32x4) -> f32x4 { + msa_frcp_w(a) +} + +/// Vector Approximate Floating-Point Reciprocal +/// +/// The reciprocals of floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// are calculated and the result is written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(frcp.d))] +unsafe fn __msa_frcp_d(a: f64x2) -> f64x2 { + msa_frcp_d(a) +} + +/// Vector Approximate Floating-Point Reciprocal of Square Root +/// +/// The reciprocals of the square roots of floating-point elements in vector 'a' (four 32-bit floating point numbers) +/// are calculated and the result is written to vector (four 32-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(frsqrt.w))] +unsafe fn __msa_frsqrt_w(a: f32x4) -> f32x4 { + msa_frsqrt_w(a) +} + +/// Vector Approximate Floating-Point Reciprocal of Square Root +/// +/// The reciprocals of the square roots of floating-point elements in vector 'a' (two 64-bit floating point numbers) +/// are calculated and the result is written to vector (two 64-bit floating point numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(frsqrt.d))] +unsafe fn __msa_frsqrt_d(a: f64x2) -> f64x2 { + msa_frsqrt_d(a) +} + +/// Vector Floating-Point Signaling Compare Always False +/// +/// Set all bits to 0 in vector (four signed 32-bit integer numbers) elements. +/// Signaling and quiet NaN elements in vector 'a' (four 32-bit floating point numbers) +/// or 'b' (four 32-bit floating point numbers) signal Invalid Operation exception. +/// In case of a floating-point exception, the default result has all bits set to 0 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsaf.w))] +unsafe fn __msa_fsaf_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsaf_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Always False +/// +/// Set all bits to 0 in vector (two signed 64-bit integer numbers) elements. +/// Signaling and quiet NaN elements in vector 'a' (two 64-bit floating point numbers) +/// or 'b' (two 64-bit floating point numbers) signal Invalid Operation exception. +/// In case of a floating-point exception, the default result has all bits set to 0 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsaf.d))] +unsafe fn __msa_fsaf_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsaf_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) +/// and 'b' (four 32-bit floating point numbers) elements are equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fseq.w))] +unsafe fn __msa_fseq_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fseq_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) +/// and 'b' (two 64-bit floating point numbers) elementsare equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fseq.d))] +unsafe fn __msa_fseq_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fseq_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Less or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) elements +/// are less than or equal to 'b' (four 32-bit floating point numbers) elements, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsle.w))] +unsafe fn __msa_fsle_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsle_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Less or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) elements +/// are less than or equal to 'b' (two 64-bit floating point numbers) elements, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsle.d))] +unsafe fn __msa_fsle_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsle_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) elements +/// are less than 'b' (four 32-bit floating point numbers) elements, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fslt.w))] +unsafe fn __msa_fslt_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fslt_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) elements +/// are less than 'b' (two 64-bit floating point numbers) elements, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fslt.d))] +unsafe fn __msa_fslt_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fslt_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Not Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are not equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsne.w))] +unsafe fn __msa_fsne_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsne_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Not Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare not equal, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsne.d))] +unsafe fn __msa_fsne_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsne_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Ordered +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are ordered, +/// i.e. both elementsare not NaN values, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsor.w))] +unsafe fn __msa_fsor_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsor_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Ordered +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare ordered, +/// i.e. both elementsare not NaN values, otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsor.d))] +unsafe fn __msa_fsor_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsor_d(a, b) +} + +/// Vector Floating-Point Square Root +/// +/// The square roots of floating-point elements in vector 'a' +/// (four 32-bit floating point numbers) are written to vector +/// (four 32-bit floating point numbers) elements are ordered, +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsqrt.w))] +unsafe fn __msa_fsqrt_w(a: f32x4) -> f32x4 { + msa_fsqrt_w(a) +} + +/// Vector Floating-Point Square Root +/// +/// The square roots of floating-point elements in vector 'a' +/// (two 64-bit floating point numbers) are written to vector +/// (two 64-bit floating point numbers) elementsare ordered, +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsqrt.d))] +unsafe fn __msa_fsqrt_d(a: f64x2) -> f64x2 { + msa_fsqrt_d(a) +} + +/// Vector Floating-Point Subtraction +/// +/// The floating-point elements in vector 'b' (four 32-bit floating point numbers) +/// are subtracted from the floating-point elements in vector 'a' +/// (four 32-bit floating point numbers). +/// The result is written to vector (four 32-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsub.w))] +unsafe fn __msa_fsub_w(a: f32x4, b: f32x4) -> f32x4 { + msa_fsub_w(a, b) +} + +/// Vector Floating-Point Subtraction +/// +/// The floating-point elements in vector 'b' (two 64-bit floating point numbers) +/// are subtracted from the floating-point elements in vector 'a' +/// (two 64-bit floating point numbers). +/// The result is written to vector (two 64-bit floating point numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsub.d))] +unsafe fn __msa_fsub_d(a: f64x2, b: f64x2) -> f64x2 { + msa_fsub_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Ordered +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are unordered or equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsueq.w))] +unsafe fn __msa_fsueq_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsueq_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Ordered +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare unordered or equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsueq.d))] +unsafe fn __msa_fsueq_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsueq_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered or Less or Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) elements are +/// unordered or less than or equal to 'b' (four 32-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsule.w))] +unsafe fn __msa_fsule_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsule_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered or Less or Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) elementsare +/// unordered or less than or equal to 'b' (two 64-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsule.d))] +unsafe fn __msa_fsule_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsule_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered or Less Than +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) elements +/// are unordered or less than 'b' (four 32-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsult.w))] +unsafe fn __msa_fsult_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsult_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered or Less Than +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) elements +/// are unordered or less than 'b' (two 64-bit floating point numbers) elements +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsult.d))] +unsafe fn __msa_fsult_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsult_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are unordered, +/// i.e. at least one element is a NaN value otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsun.w))] +unsafe fn __msa_fsun_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsun_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare unordered, +/// i.e. at least one element is a NaN value otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsun.d))] +unsafe fn __msa_fsun_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsun_d(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered or Not Equal +/// +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers) and +/// 'b' (four 32-bit floating point numbers) elements are unordered or not equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsune.w))] +unsafe fn __msa_fsune_w(a: f32x4, b: f32x4) -> i32x4 { + msa_fsune_w(a, b) +} + +/// Vector Floating-Point Signaling Compare Unordered or Not Equal +/// +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// if the corresponding 'a' (two 64-bit floating point numbers) and +/// 'b' (two 64-bit floating point numbers) elementsare unordered or not equal, +/// otherwise set all bits to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(fsune.d))] +unsafe fn __msa_fsune_d(a: f64x2, b: f64x2) -> i64x2 { + msa_fsune_d(a, b) +} + +/// Vector Floating-Point Convert to Signed Integer +/// +///The elements in vector 'a' (four 32-bit floating point numbers) +/// are rounded and converted to signed integer values based on the +/// rounding mode bits RM in MSA Control and Status Register MSACSR. +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftint_s.w))] +unsafe fn __msa_ftint_s_w(a: f32x4) -> i32x4 { + msa_ftint_s_w(a) +} + +/// Vector Floating-Point Convert to Signed Integer +/// +///The elements in vector 'a' (two 64-bit floating point numbers) +/// are rounded and converted to signed integer values based on the +/// rounding mode bits RM in MSA Control and Status Register MSACSR. +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftint_s.d))] +unsafe fn __msa_ftint_s_d(a: f64x2) -> i64x2 { + msa_ftint_s_d(a) +} + +/// Vector Floating-Point Convert to Unsigned Integer +/// +/// The elements in vector 'a' (four 32-bit floating point numbers) +/// are rounded and converted to signed integer values based on the +/// rounding mode bits RM in MSA Control and Status Register MSACSR. +/// The result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftint_u.w))] +unsafe fn __msa_ftint_u_w(a: f32x4) -> u32x4 { + msa_ftint_u_w(a) +} + +/// Vector Floating-Point Convert to Unsigned Integer +/// +/// The elements in vector 'a' (two 64-bit floating point numbers) +/// are rounded and converted to signed integer values based on the +/// rounding mode bits RM in MSA Control and Status Register MSACSR. +/// The result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftint_u.d))] +unsafe fn __msa_ftint_u_d(a: f64x2) -> u64x2 { + msa_ftint_u_d(a) +} + +/// Vector Floating-Point Convert to Fixed-Point +/// +/// The elements in vector 'a' (four 32-bit floating point numbers) +/// and 'b' (four 32-bit floating point numbers) are down-converted to a fixed-point +/// representation, i.e. from 64-bit floating-point to 32-bit Q31 fixed-point +/// representation, or from 32-bit floating-point to 16-bit Q15 fixed-point representation. +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftq.h))] +unsafe fn __msa_ftq_h(a: f32x4, b: f32x4) -> i16x8 { + msa_ftq_h(a, b) +} + +/// Vector Floating-Point Convert to Fixed-Point +/// +/// The elements in vector 'a' (two 64-bit floating point numbers) +/// and 'b' (two 64-bit floating point numbers) are down-converted to a fixed-point +/// representation, i.e. from 64-bit floating-point to 32-bit Q31 fixed-point +/// representation, or from 32-bit floating-point to 16-bit Q15 fixed-point representation. +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftq.w))] +unsafe fn __msa_ftq_w(a: f64x2, b: f64x2) -> i32x4 { + msa_ftq_w(a, b) +} + +/// Vector Floating-Point Truncate and Convert to Signed Integer +/// +/// The elements in vector 'a' (four 32-bit floating point numbers) +/// are truncated, i.e. rounded toward zero, to signed integer values. +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftrunc_s.w))] +unsafe fn __msa_ftrunc_s_w(a: f32x4) -> i32x4 { + msa_ftrunc_s_w(a) +} + +/// Vector Floating-Point Truncate and Convert to Signed Integer +/// +/// The elements in vector 'a' (two 64-bit floating point numbers) +/// are truncated, i.e. rounded toward zero, to signed integer values. +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftrunc_s.d))] +unsafe fn __msa_ftrunc_s_d(a: f64x2) -> i64x2 { + msa_ftrunc_s_d(a) +} + +/// Vector Floating-Point Truncate and Convert to Unsigned Integer +/// +/// The elements in vector 'a' (four 32-bit floating point numbers) +/// are truncated, i.e. rounded toward zero, to unsigned integer values. +/// The result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftrunc_u.w))] +unsafe fn __msa_ftrunc_u_w(a: f32x4) -> u32x4 { + msa_ftrunc_u_w(a) +} + +/// Vector Floating-Point Truncate and Convert to Unsigned Integer +/// +/// The elements in vector 'a' (two 64-bit floating point numbers) +/// are truncated, i.e. rounded toward zero, to unsigned integer values. +/// The result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ftrunc_u.d))] +unsafe fn __msa_ftrunc_u_d(a: f64x2) -> u64x2 { + msa_ftrunc_u_d(a) +} + +/// Vector Signed Horizontal Add +/// +/// The sign-extended odd elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// are added to the sign-extended even elements in vector 'b' (sixteen signed 8-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hadd_s.h))] +unsafe fn __msa_hadd_s_h(a: i8x16, b: i8x16) -> i16x8 { + msa_hadd_s_h(a, b) +} + +/// Vector Signed Horizontal Add +/// +/// The sign-extended odd elements in vector 'a' (eight signed 16-bit integer numbers) +/// are added to the sign-extended even elements in vector 'b' (eight signed 16-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hadd_s.w))] +unsafe fn __msa_hadd_s_w(a: i16x8, b: i16x8) -> i32x4 { + msa_hadd_s_w(a, b) +} + +/// Vector Signed Horizontal Add +/// +/// The sign-extended odd elements in vector 'a' (four signed 32-bit integer numbers) +/// are added to the sign-extended even elements in vector 'b' (four signed 32-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hadd_s.d))] +unsafe fn __msa_hadd_s_d(a: i32x4, b: i32x4) -> i64x2 { + msa_hadd_s_d(a, b) +} + +/// Vector Unsigned Horizontal Add +/// +/// The zero-extended odd elements in vector 'a' (sixteen unsigned 8-bit integer numbers) +/// are added to the zero-extended even elements in vector 'b' (sixteen unsigned 8-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hadd_u.h))] +unsafe fn __msa_hadd_u_h(a: u8x16, b: u8x16) -> u16x8 { + msa_hadd_u_h(a, b) +} + +/// Vector Unsigned Horizontal Add +/// +/// The zero-extended odd elements in vector 'a' (eight unsigned 16-bit integer numbers) +/// are added to the zero-extended even elements in vector 'b' (eight unsigned 16-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hadd_u.w))] +unsafe fn __msa_hadd_u_w(a: u16x8, b: u16x8) -> u32x4 { + msa_hadd_u_w(a, b) +} + +/// Vector Unsigned Horizontal Add +/// +/// The zero-extended odd elements in vector 'a' (four unsigned 32-bit integer numbers) +/// are added to the zero-extended even elements in vector 'b' (four unsigned 32-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hadd_u.d))] +unsafe fn __msa_hadd_u_d(a: u32x4, b: u32x4) -> u64x2 { + msa_hadd_u_d(a, b) +} + +/// Vector Signed Horizontal Subtract +/// +/// The sign-extended odd elements in vector 'b' (sixteen signed 8-bit integer numbers) +/// are subtracted from the sign-extended elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hsub_s.h))] +unsafe fn __msa_hsub_s_h(a: i8x16, b: i8x16) -> i16x8 { + msa_hsub_s_h(a, b) +} + +/// Vector Signed Horizontal Subtract +/// +/// The sign-extended odd elements in vector 'b' (eight signed 16-bit integer numbers) +/// are subtracted from the sign-extended elements in vector 'a' (eight signed 16-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hsub_s.w))] +unsafe fn __msa_hsub_s_w(a: i16x8, b: i16x8) -> i32x4 { + msa_hsub_s_w(a, b) +} + +/// Vector Signed Horizontal Subtract +/// +/// The sign-extended odd elements in vector 'b' (four signed 32-bit integer numbers) +/// are subtracted from the sign-extended elements in vector 'a' (four signed 32-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hsub_s.d))] +unsafe fn __msa_hsub_s_d(a: i32x4, b: i32x4) -> i64x2 { + msa_hsub_s_d(a, b) +} + +/// Vector Unsigned Horizontal Subtract +/// +/// The zero-extended odd elements in vector 'b' (sixteen unsigned 8-bit integer numbers) +/// are subtracted from the zero-extended elements in vector 'a' (sixteen unsigned 8-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hsub_u.h))] +unsafe fn __msa_hsub_u_h(a: u8x16, b: u8x16) -> i16x8 { + msa_hsub_u_h(a, b) +} + +/// Vector Unsigned Horizontal Subtract +/// +/// The zero-extended odd elements in vector 'b' (eight unsigned 16-bit integer numbers) +/// are subtracted from the zero-extended elements in vector 'a' (eight unsigned 16-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hsub_u.w))] +unsafe fn __msa_hsub_u_w(a: u16x8, b: u16x8) -> i32x4 { + msa_hsub_u_w(a, b) +} + +/// Vector Unsigned Horizontal Subtract +/// +/// The zero-extended odd elements in vector 'b' (four unsigned 32-bit integer numbers) +/// are subtracted from the zero-extended elements in vector 'a' (four unsigned 32-bit integer numbers) +/// producing aresult twice the size of the input operands. +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(hsub_u.d))] +unsafe fn __msa_hsub_u_d(a: u32x4, b: u32x4) -> i64x2 { + msa_hsub_u_d(a, b) +} + +/// Vector Interleave Even +/// +/// Even elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// and vector 'b' (sixteen signed 8-bit integer numbers) are copied to the result +/// (sixteen signed 8-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvev.b))] +unsafe fn __msa_ilvev_b(a: i8x16, b: i8x16) -> i8x16 { + msa_ilvev_b(a, b) +} + +/// Vector Interleave Even +/// +/// Even elements in vectors 'a' (eight signed 16-bit integer numbers) +/// and vector 'b' (eight signed 16-bit integer numbers) are copied to the result +/// (eight signed 16-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvev.h))] +unsafe fn __msa_ilvev_h(a: i16x8, b: i16x8) -> i16x8 { + msa_ilvev_h(a, b) +} + +/// Vector Interleave Even +/// +/// Even elements in vectors 'a' (four signed 32-bit integer numbers) +/// and vector 'b' (four signed 32-bit integer numbers) are copied to the result +/// (four signed 32-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvev.w))] +unsafe fn __msa_ilvev_w(a: i32x4, b: i32x4) -> i32x4 { + msa_ilvev_w(a, b) +} + +/// Vector Interleave Even +/// +/// Even elements in vectors 'a' (two signed 64-bit integer numbers) +/// and vector 'b' (two signed 64-bit integer numbers) are copied to the result +/// (two signed 64-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvev.d))] +unsafe fn __msa_ilvev_d(a: i64x2, b: i64x2) -> i64x2 { + msa_ilvev_d(a, b) +} + +/// Vector Interleave Left +/// +/// The left half elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// and vector 'b' (sixteen signed 8-bit integer numbers) are copied to the result +/// (sixteen signed 8-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvl.b))] +unsafe fn __msa_ilvl_b(a: i8x16, b: i8x16) -> i8x16 { + msa_ilvl_b(a, b) +} + +/// Vector Interleave Left +/// +/// The left half elements in vectors 'a' (eight signed 16-bit integer numbers) +/// and vector 'b' (eight signed 16-bit integer numbers) are copied to the result +/// (eight signed 16-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvl.h))] +unsafe fn __msa_ilvl_h(a: i16x8, b: i16x8) -> i16x8 { + msa_ilvl_h(a, b) +} + +/// Vector Interleave Left +/// +/// The left half elements in vectors 'a' (four signed 32-bit integer numbers) +/// and vector 'b' (four signed 32-bit integer numbers) are copied to the result +/// (four signed 32-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvl.w))] +unsafe fn __msa_ilvl_w(a: i32x4, b: i32x4) -> i32x4 { + msa_ilvl_w(a, b) +} + +/// Vector Interleave Left +/// +/// The left half elements in vectors 'a' (two signed 64-bit integer numbers) +/// and vector 'b' (two signed 64-bit integer numbers) are copied to the result +/// (two signed 64-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvl.d))] +unsafe fn __msa_ilvl_d(a: i64x2, b: i64x2) -> i64x2 { + msa_ilvl_d(a, b) +} + +/// Vector Interleave Odd +/// +/// Odd elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// and vector 'b' (sixteen signed 8-bit integer numbers) are copied to the result +/// (sixteen signed 8-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvod.b))] +unsafe fn __msa_ilvod_b(a: i8x16, b: i8x16) -> i8x16 { + msa_ilvod_b(a, b) +} + +/// Vector Interleave Odd +/// +/// Odd elements in vectors 'a' (eight signed 16-bit integer numbers) +/// and vector 'b' (eight signed 16-bit integer numbers) are copied to the result +/// (eight signed 16-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvod.h))] +unsafe fn __msa_ilvod_h(a: i16x8, b: i16x8) -> i16x8 { + msa_ilvod_h(a, b) +} + +/// Vector Interleave Odd +/// +/// Odd elements in vectors 'a' (four signed 32-bit integer numbers) +/// and vector 'b' (four signed 32-bit integer numbers) are copied to the result +/// (four signed 32-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvod.w))] +unsafe fn __msa_ilvod_w(a: i32x4, b: i32x4) -> i32x4 { + msa_ilvod_w(a, b) +} + +/// Vector Interleave Odd +/// +/// Odd elements in vectors 'a' (two signed 64-bit integer numbers) +/// and vector 'b' (two signed 64-bit integer numbers) are copied to the result +/// (two signed 64-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvod.d))] +unsafe fn __msa_ilvod_d(a: i64x2, b: i64x2) -> i64x2 { + msa_ilvod_d(a, b) +} + +/// Vector Interleave Right +/// +/// The right half elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// and vector 'b' (sixteen signed 8-bit integer numbers) are copied to the result +/// (sixteen signed 8-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvr.b))] +unsafe fn __msa_ilvr_b(a: i8x16, b: i8x16) -> i8x16 { + msa_ilvr_b(a, b) +} + +/// Vector Interleave Right +/// +/// The right half elements in vectors 'a' (eight signed 16-bit integer numbers) +/// and vector 'b' (eight signed 16-bit integer numbers) are copied to the result +/// (eight signed 16-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvr.h))] +unsafe fn __msa_ilvr_h(a: i16x8, b: i16x8) -> i16x8 { + msa_ilvr_h(a, b) +} + +/// Vector Interleave Right +/// +/// The right half elements in vectors 'a' (four signed 32-bit integer numbers) +/// and vector 'b' (four signed 32-bit integer numbers) are copied to the result +/// (four signed 32-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvr.w))] +unsafe fn __msa_ilvr_w(a: i32x4, b: i32x4) -> i32x4 { + msa_ilvr_w(a, b) +} + +/// Vector Interleave Right +/// +/// The right half elements in vectors 'a' (two signed 64-bit integer numbers) +/// and vector 'b' (two signed 64-bit integer numbers) are copied to the result +/// (two signed 64-bit integer numbers) +/// alternating one element from 'a' with one element from 'b'. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ilvr.d))] +unsafe fn __msa_ilvr_d(a: i64x2, b: i64x2) -> i64x2 { + msa_ilvr_d(a, b) +} + +/// GPR Insert Element +/// +/// Set element imm4 in vector 'a' (sixteen signed 8-bit integer numbers) to GPR 'c' value. +/// All other elements in vector 'a' are unchanged. If the source GPR is wider than the +/// destination data format, the destination's elements will be set to the least significant bits of the GPR. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insert.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insert_b(a: i8x16, imm4: i32, c: i32) -> i8x16 { + macro_rules! call { + ($imm4:expr) => { + msa_insert_b(a, $imm4, c) + }; + } + constify_imm4!(imm4, call) +} + +/// GPR Insert Element +/// +/// Set element imm3 in vector 'a' (eight signed 16-bit integer numbers) to GPR 'c' value. +/// All other elements in vector 'a' are unchanged. If the source GPR is wider than the +/// destination data format, the destination's elements will be set to the least significant bits of the GPR. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insert.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insert_h(a: i16x8, imm3: i32, c: i32) -> i16x8 { + macro_rules! call { + ($imm3:expr) => { + msa_insert_h(a, $imm3, c) + }; + } + constify_imm3!(imm3, call) +} + +/// GPR Insert Element +/// +/// Set element imm2 in vector 'a' (four signed 32-bit integer numbers) to GPR 'c' value. +/// All other elements in vector 'a' are unchanged. If the source GPR is wider than the +/// destination data format, the destination's elements will be set to the least significant bits of the GPR. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insert.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insert_w(a: i32x4, imm2: i32, c: i32) -> i32x4 { + macro_rules! call { + ($imm2:expr) => { + msa_insert_w(a, $imm2, c) + }; + } + constify_imm2!(imm2, call) +} + +/// GPR Insert Element +/// +/// Set element imm1 in vector 'a' (two signed 64-bit integer numbers) to GPR 'c' value. +/// All other elements in vector 'a' are unchanged. If the source GPR is wider than the +/// destination data format, the destination's elements will be set to the least significant bits of the GPR. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insert.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insert_d(a: i64x2, imm1: i32, c: i64) -> i64x2 { + macro_rules! call { + ($imm1:expr) => { + msa_insert_d(a, $imm1, c) + }; + } + constify_imm1!(imm1, call) +} + +/// Element Insert Element +/// +/// Set element imm1 in the result vector 'a' (sixteen signed 8-bit integer numbers) to element 0 +/// in vector 'c' (sixteen signed 8-bit integer numbers) value. +/// All other elements in vector 'a' are unchanged. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insve.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insve_b(a: i8x16, imm4: i32, c: i8x16) -> i8x16 { + macro_rules! call { + ($imm4:expr) => { + msa_insve_b(a, $imm4, c) + }; + } + constify_imm4!(imm4, call) +} + +/// Element Insert Element +/// +/// Set element imm1 in the result vector 'a' (eight signed 16-bit integer numbers) to element 0 +/// in vector 'c' (eight signed 16-bit integer numbers) value. +/// All other elements in vector 'a' are unchanged. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insve.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insve_h(a: i16x8, imm3: i32, c: i16x8) -> i16x8 { + macro_rules! call { + ($imm3:expr) => { + msa_insve_h(a, $imm3, c) + }; + } + constify_imm3!(imm3, call) +} + +/// Element Insert Element +/// +/// Set element imm1 in the result vector 'a' (four signed 32-bit integer numbers) to element 0 +/// in vector 'c' (four signed 32-bit integer numbers) value. +/// All other elements in vector 'a' are unchanged. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insve.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insve_w(a: i32x4, imm2: i32, c: i32x4) -> i32x4 { + macro_rules! call { + ($imm2:expr) => { + msa_insve_w(a, $imm2, c) + }; + } + constify_imm2!(imm2, call) +} + +/// Element Insert Element +/// +/// Set element imm1 in the result vector 'a' (two signed 64-bit integer numbers) to element 0 +/// in vector 'c' (two signed 64-bit integer numbers) value. +/// All other elements in vector 'a' are unchanged. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(insve.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_insve_d(a: i64x2, imm1: i32, c: i64x2) -> i64x2 { + macro_rules! call { + ($imm1:expr) => { + msa_insve_d(a, $imm1, c) + }; + } + constify_imm1!(imm1, call) +} + +/// Vector Load +/// +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s10 are fetched and placed in +/// the vector (sixteen signed 8-bit integer numbers) value. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ld.b, imm_s10 = 0b1111111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ld_b(mem_addr: *mut i8, imm_s10: i32) -> i8x16 { + macro_rules! call { + ($imm_s10:expr) => { + msa_ld_b(mem_addr, $imm_s10) + }; + } + constify_imm_s10!(imm_s10, call) +} + +/// Vector Load +/// +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s11 are fetched and placed in +/// the vector (eight signed 16-bit integer numbers) value. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ld.h, imm_s11 = 0b11111111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ld_h(mem_addr: *mut i8, imm_s11: i32) -> i16x8 { + macro_rules! call { + ($imm_s11:expr) => { + msa_ld_h(mem_addr, $imm_s11) + }; + } + constify_imm_s11!(imm_s11, call) +} + +/// Vector Load +/// +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s12 are fetched and placed in +/// the vector (four signed 32-bit integer numbers) value. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ld.w, imm_s12 = 0b111111111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ld_w(mem_addr: *mut i8, imm_s12: i32) -> i32x4 { + macro_rules! call { + ($imm_s12:expr) => { + msa_ld_w(mem_addr, $imm_s12) + }; + } + constify_imm_s12!(imm_s12, call) +} + +/// Vector Load +/// +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s13 are fetched and placed in +/// the vector (two signed 64-bit integer numbers) value. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ld.d, imm_s13 = 0b1111111111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ld_d(mem_addr: *mut i8, imm_s13: i32) -> i64x2 { + macro_rules! call { + ($imm_s13:expr) => { + msa_ld_d(mem_addr, $imm_s13) + }; + } + constify_imm_s13!(imm_s13, call) +} + +/// Immediate Load +/// +/// The signed immediate imm_s10 is replicated in all vector +/// (sixteen signed 8-bit integer numbers) elements. For byte elements, +/// only the least significant 8 bits of imm_s10 will be used. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))] +#[rustc_args_required_const(0)] +unsafe fn __msa_ldi_b(imm_s10: i32) -> i8x16 { + macro_rules! call { + ($imm_s10:expr) => { + msa_ldi_b($imm_s10) + }; + } + constify_imm_s10!(imm_s10, call) +} + +/// Immediate Load +/// +/// The signed immediate imm_s10 is replicated in all vector +/// (eight signed 16-bit integer numbers) elements. For byte elements, +/// only the least significant 8 bits of imm_s10 will be used. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))] +#[rustc_args_required_const(0)] +unsafe fn __msa_ldi_h(imm_s10: i32) -> i16x8 { + macro_rules! call { + ($imm_s10:expr) => { + msa_ldi_h($imm_s10) + }; + } + constify_imm_s10!(imm_s10, call) +} + +/// Immediate Load +/// +/// The signed immediate imm_s10 is replicated in all vector +/// (four signed 32-bit integer numbers) elements. For byte elements, +/// only the least significant 8 bits of imm_s10 will be used. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))] +#[rustc_args_required_const(0)] +unsafe fn __msa_ldi_w(imm_s10: i32) -> i32x4 { + macro_rules! call { + ($imm_s10:expr) => { + msa_ldi_w($imm_s10) + }; + } + constify_imm_s10!(imm_s10, call) +} + +/// Immediate Load +/// +/// The signed immediate imm_s10 is replicated in all vector +/// (two signed 64-bit integer numbers) elements. For byte elements, +/// only the least significant 8 bits of imm_s10 will be used. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))] +#[rustc_args_required_const(0)] +unsafe fn __msa_ldi_d(imm_s10: i32) -> i64x2 { + macro_rules! call { + ($imm_s10:expr) => { + msa_ldi_d($imm_s10) + }; + } + constify_imm_s10!(imm_s10, call) +} + +/// Vector Fixed-Point Multiply and Add +/// +/// The products of fixed-point elements in 'b' (eight signed 16-bit integer numbers) +/// by fixed-point elements in vector 'c' (eight signed 16-bit integer numbers) +/// are added to the fixed-pointelements in vector 'a' (eight signed 16-bit integer numbers) +/// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. +/// The saturated fixed-point results are stored to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(madd_q.h))] +unsafe fn __msa_madd_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_madd_q_h(a, b, c) +} + +/// Vector Fixed-Point Multiply and Add +/// +/// The products of fixed-point elements in 'b' (four signed 32-bit integer numbers) +/// by fixed-point elements in vector 'c' (four signed 32-bit integer numbers) +/// are added to the fixed-pointelements in vector 'a' (four signed 32-bit integer numbers) +/// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. +/// The saturated fixed-point results are stored to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(madd_q.w))] +unsafe fn __msa_madd_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_madd_q_w(a, b, c) +} + +/// Vector Fixed-Point Multiply and Add Rounded +/// +/// The products of fixed-point elements in 'b' (eight signed 16-bit integer numbers) +/// by fixed-point elements in vector 'c' (eight signed 16-bit integer numbers) +/// are added to the fixed-pointelements in vector 'a' (eight signed 16-bit integer numbers) +/// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. +/// The rounded and saturated fixed-point results are stored to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maddr_q.h))] +unsafe fn __msa_maddr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_maddr_q_h(a, b, c) +} + +/// Vector Fixed-Point Multiply and Add Rounded +/// +/// The products of fixed-point elements in 'b' (four signed 32-bit integer numbers) +/// by fixed-point elements in vector 'c' (four signed 32-bit integer numbers) +/// are added to the fixed-pointelements in vector 'a' (four signed 32-bit integer numbers) +/// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. +/// The rounded and saturated fixed-point results are stored to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maddr_q.w))] +unsafe fn __msa_maddr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_maddr_q_w(a, b, c) +} + +/// Vector Multiply and Add +/// +/// The integer elements in vector 'b' (sixteen signed 8-bit integer numbers) +/// are multiplied by integer elements in vector 'c' (sixteen signed 8-bit integer numbers) +/// and added to the integer elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maddv.b))] +unsafe fn __msa_maddv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { + msa_maddv_b(a, b, c) +} + +/// Vector Multiply and Add +/// +/// The integer elements in vector 'b' (eight signed 16-bit integer numbers) +/// are multiplied by integer elements in vector 'c' (eight signed 16-bit integer numbers) +/// and added to the integer elements in vector 'a' (eight signed 16-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maddv.h))] +unsafe fn __msa_maddv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_maddv_h(a, b, c) +} + +/// Vector Multiply and Add +/// +/// The integer elements in vector 'b' (four signed 32-bit integer numbers) +/// are multiplied by integer elements in vector 'c' (four signed 32-bit integer numbers) +/// and added to the integer elements in vector 'a' (four signed 32-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maddv.w))] +unsafe fn __msa_maddv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_maddv_w(a, b, c) +} + +/// Vector Multiply and Add +/// +/// The integer elements in vector 'b' (two signed 64-bit integer numbers) +/// are multiplied by integer elements in vector 'c' (two signed 64-bit integer numbers) +/// and added to the integer elements in vector 'a' (two signed 64-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maddv.d))] +unsafe fn __msa_maddv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { + msa_maddv_d(a, b, c) +} + +/// Vector Maximum Based on Absolute Values +/// +/// The value with the largest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(sixteen signed 8-bit integer numbers) and +/// 'b'(sixteen signed 8-bit integer numbers) are written to vector +/// (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_a.b))] +unsafe fn __msa_max_a_b(a: i8x16, b: i8x16) -> i8x16 { + msa_max_a_b(a, b) +} + +/// Vector Maximum Based on Absolute Values +/// +/// The value with the largest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(eight signed 16-bit integer numbers) and +/// 'b'(eight signed 16-bit integer numbers) are written to vector +/// (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_a.h))] +unsafe fn __msa_max_a_h(a: i16x8, b: i16x8) -> i16x8 { + msa_max_a_h(a, b) +} + +/// Vector Maximum Based on Absolute Values +/// +/// The value with the largest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(four signed 32-bit integer numbers) and +/// 'b'(four signed 32-bit integer numbers) are written to vector +/// (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_a.w))] +unsafe fn __msa_max_a_w(a: i32x4, b: i32x4) -> i32x4 { + msa_max_a_w(a, b) +} + +/// Vector Maximum Based on Absolute Values +/// +/// The value with the largest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(two signed 64-bit integer numbers) and +/// 'b'(two signed 64-bit integer numbers) are written to vector +/// (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_a.d))] +unsafe fn __msa_max_a_d(a: i64x2, b: i64x2) -> i64x2 { + msa_max_a_d(a, b) +} + +/// Vector Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// and signed elements in vector 'b'(sixteen signed 8-bit integer numbers) are written to vector +/// (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_s.b))] +unsafe fn __msa_max_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_max_s_b(a, b) +} + +/// Vector Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(eight signed 16-bit integer numbers) +/// and signed elements in vector 'b'(eight signed 16-bit integer numbers) are written to vector +/// (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_s.h))] +unsafe fn __msa_max_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_max_s_h(a, b) +} + +/// Vector Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(four signed 32-bit integer numbers) +/// and signed elements in vector 'b'(four signed 32-bit integer numbers) are written to vector +/// (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_s.w))] +unsafe fn __msa_max_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_max_s_w(a, b) +} + +/// Vector Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(two signed 64-bit integer numbers) +/// and signed elements in vector 'b'(two signed 64-bit integer numbers) are written to vector +/// (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_s.d))] +unsafe fn __msa_max_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_max_s_d(a, b) +} + +/// Vector Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(sixteen unsigned 8-bit integer numbers) +/// and unsigned elements in vector 'b'(sixteen unsigned 8-bit integer numbers) are written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_u.b))] +unsafe fn __msa_max_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_max_u_b(a, b) +} + +/// Vector Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(eight unsigned 16-bit integer numbers) +/// and unsigned elements in vector 'b'(eight unsigned 16-bit integer numbers) are written to vector +/// (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_u.h))] +unsafe fn __msa_max_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_max_u_h(a, b) +} + +/// Vector Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(four unsigned 32-bit integer numbers) +/// and unsigned elements in vector 'b'(four unsigned 32-bit integer numbers) are written to vector +/// (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_u.w))] +unsafe fn __msa_max_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_max_u_w(a, b) +} + +/// Vector Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(two unsigned 64-bit integer numbers) +/// and unsigned elements in vector 'b'(two unsigned 64-bit integer numbers) are written to vector +/// (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(max_u.d))] +unsafe fn __msa_max_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_max_u_d(a, b) +} + +/// Immediate Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_s.b, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_s_b(a: i8x16, imm_s5: i32) -> i8x16 { + macro_rules! call { + ($imm_s5:expr) => { + msa_maxi_s_b(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(eight signed 16-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_s.h, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_s_h(a: i16x8, imm_s5: i32) -> i16x8 { + macro_rules! call { + ($imm_s5:expr) => { + msa_maxi_s_h(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(four signed 32-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_s.w, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_s_w(a: i32x4, imm_s5: i32) -> i32x4 { + macro_rules! call { + ($imm_s5:expr) => { + msa_maxi_s_w(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Signed Maximum +/// +/// Maximum values between signed elements in vector 'a'(two signed 64-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_s.d, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_s_d(a: i64x2, imm_s5: i32) -> i64x2 { + macro_rules! call { + ($imm_s5:expr) => { + msa_maxi_s_d(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(sixteen unsigned 8-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_u.b, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_u_b(a: u8x16, imm5: i32) -> u8x16 { + macro_rules! call { + ($imm5:expr) => { + msa_maxi_u_b(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(eight unsigned 16-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_u.h, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_u_h(a: u16x8, imm5: i32) -> u16x8 { + macro_rules! call { + ($imm5:expr) => { + msa_maxi_u_h(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(four unsigned 32-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_u.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_u_w(a: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_maxi_u_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Maximum +/// +/// Maximum values between unsigned elements in vector 'a'(two unsigned 64-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(maxi_u.d, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_maxi_u_d(a: u64x2, imm5: i32) -> u64x2 { + macro_rules! call { + ($imm5:expr) => { + msa_maxi_u_d(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Minimum Based on Absolute Value +/// +/// The value with the smallest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(sixteen signed 8-bit integer numbers) and +/// 'b'(sixteen signed 8-bit integer numbers) are written to vector +/// (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_a.b))] +unsafe fn __msa_min_a_b(a: i8x16, b: i8x16) -> i8x16 { + msa_min_a_b(a, b) +} + +/// Vector Minimum Based on Absolute Value +/// +/// The value with the smallest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(eight signed 16-bit integer numbers) and +/// 'b'(eight signed 16-bit integer numbers) are written to vector +/// (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_a.h))] +unsafe fn __msa_min_a_h(a: i16x8, b: i16x8) -> i16x8 { + msa_min_a_h(a, b) +} + +/// Vector Minimum Based on Absolute Value +/// +/// The value with the smallest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(four signed 32-bit integer numbers) and +/// 'b'(four signed 32-bit integer numbers) are written to vector +/// (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_a.w))] +unsafe fn __msa_min_a_w(a: i32x4, b: i32x4) -> i32x4 { + msa_min_a_w(a, b) +} + +/// Vector Minimum Based on Absolute Value +/// +/// The value with the smallest magnitude, i.e. absolute value, between corresponding +/// signed elements in vector 'a'(two signed 64-bit integer numbers) and +/// 'b'(two signed 64-bit integer numbers) are written to vector +/// (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_a.d))] +unsafe fn __msa_min_a_d(a: i64x2, b: i64x2) -> i64x2 { + msa_min_a_d(a, b) +} + +/// Vector Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// and signed elements in vector 'b'(sixteen signed 8-bit integer numbers) are written to vector +/// (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_s.b))] +unsafe fn __msa_min_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_min_s_b(a, b) +} + +/// Vector Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(eight signed 16-bit integer numbers) +/// and signed elements in vector 'b'(eight signed 16-bit integer numbers) are written to vector +/// (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_s.h))] +unsafe fn __msa_min_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_min_s_h(a, b) +} + +/// Vector Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(four signed 32-bit integer numbers) +/// and signed elements in vector 'b'(four signed 32-bit integer numbers) are written to vector +/// (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_s.w))] +unsafe fn __msa_min_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_min_s_w(a, b) +} + +/// Vector Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(two signed 64-bit integer numbers) +/// and signed elements in vector 'b'(two signed 64-bit integer numbers) are written to vector +/// (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_s.d))] +unsafe fn __msa_min_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_min_s_d(a, b) +} + +/// Immediate Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_s.b, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_s_b(a: i8x16, imm_s5: i32) -> i8x16 { + macro_rules! call { + ($imm_s5:expr) => { + msa_mini_s_b(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(eight signed 16-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_s.h, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_s_h(a: i16x8, imm_s5: i32) -> i16x8 { + macro_rules! call { + ($imm_s5:expr) => { + msa_mini_s_h(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(four signed 32-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_s.w, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_s_w(a: i32x4, imm_s5: i32) -> i32x4 { + macro_rules! call { + ($imm_s5:expr) => { + msa_mini_s_w(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Immediate Signed Minimum +/// +/// Minimum values between signed elements in vector 'a'(two signed 64-bit integer numbers) +/// and the 5-bit signed immediate imm_s5 are written to vector +/// (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_s.d, imm_s5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_s_d(a: i64x2, imm_s5: i32) -> i64x2 { + macro_rules! call { + ($imm_s5:expr) => { + msa_mini_s_d(a, $imm_s5) + }; + } + constify_imm_s5!(imm_s5, call) +} + +/// Vector Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(sixteen unsigned 8-bit integer numbers) +/// and unsigned elements in vector 'b'(sixteen unsigned 8-bit integer numbers) are written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_u.b))] +unsafe fn __msa_min_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_min_u_b(a, b) +} + +/// Vector Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(eight unsigned 16-bit integer numbers) +/// and unsigned elements in vector 'b'(eight unsigned 16-bit integer numbers) are written to vector +/// (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_u.h))] +unsafe fn __msa_min_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_min_u_h(a, b) +} + +/// Vector Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(four unsigned 32-bit integer numbers) +/// and unsigned elements in vector 'b'(four unsigned 32-bit integer numbers) are written to vector +/// (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_u.w))] +unsafe fn __msa_min_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_min_u_w(a, b) +} + +/// Vector Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(two unsigned 64-bit integer numbers) +/// and unsigned elements in vector 'b'(two unsigned 64-bit integer numbers) are written to vector +/// (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(min_u.d))] +unsafe fn __msa_min_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_min_u_d(a, b) +} + +/// Immediate Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(sixteen unsigned 8-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_u.b, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_u_b(a: u8x16, imm5: i32) -> u8x16 { + macro_rules! call { + ($imm5:expr) => { + msa_mini_u_b(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(eight unsigned 16-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_u.h, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_u_h(a: u16x8, imm5: i32) -> u16x8 { + macro_rules! call { + ($imm5:expr) => { + msa_mini_u_h(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(four unsigned 32-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_u.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_u_w(a: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_mini_u_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Minimum +/// +/// Minimum values between unsigned elements in vector 'a'(two unsigned 64-bit integer numbers) +/// and the 5-bit unsigned immediate imm5 are written to vector +/// (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mini_u.d, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_mini_u_d(a: u64x2, imm5: i32) -> u64x2 { + macro_rules! call { + ($imm5:expr) => { + msa_mini_u_d(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Signed Modulo +/// +/// The signed integer elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are divided by signed integer elements in vector 'b'(sixteen signed 8-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (sixteen signed 8-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_s.b))] +unsafe fn __msa_mod_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_mod_s_b(a, b) +} + +/// Vector Signed Modulo +/// +/// The signed integer elements in vector 'a'(eight signed 16-bit integer numbers) +/// are divided by signed integer elements in vector 'b'(eight signed 16-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (eight signed 16-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_s.h))] +unsafe fn __msa_mod_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_mod_s_h(a, b) +} + +/// Vector Signed Modulo +/// +/// The signed integer elements in vector 'a'(four signed 32-bit integer numbers) +/// are divided by signed integer elements in vector 'b'(four signed 32-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (four signed 32-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_s.w))] +unsafe fn __msa_mod_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_mod_s_w(a, b) +} + +/// Vector Signed Modulo +/// +/// The signed integer elements in vector 'a'(two signed 64-bit integer numbers) +/// are divided by signed integer elements in vector 'b'(two signed 64-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (two signed 64-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_s.d))] +unsafe fn __msa_mod_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_mod_s_d(a, b) +} + +/// Vector Unsigned Modulo +/// +/// The unsigned integer elements in vector 'a'(sixteen unsigned 8-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b'(sixteen unsigned 8-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (sixteen unsigned 8-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_u.b))] +unsafe fn __msa_mod_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_mod_u_b(a, b) +} + +/// Vector Unsigned Modulo +/// +/// The unsigned integer elements in vector 'a'(eight unsigned 16-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b'(eight unsigned 16-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (eight unsigned 16-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_u.h))] +unsafe fn __msa_mod_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_mod_u_h(a, b) +} + +/// Vector Unsigned Modulo +/// +/// The unsigned integer elements in vector 'a'(four unsigned 32-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b'(four unsigned 32-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (four unsigned 32-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_u.w))] +unsafe fn __msa_mod_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_mod_u_w(a, b) +} + +/// Vector Unsigned Modulo +/// +/// The unsigned integer elements in vector 'a'(two unsigned 64-bit integer numbers) +/// are divided by unsigned integer elements in vector 'b'(two unsigned 64-bit integer numbers) +/// The remainder of thesame sign as the dividend is written to vector +/// (two unsigned 64-bit integer numbers).If a divisor element vectorwt is zero, +/// the result value is UNPREDICTABLE. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mod_u.d))] +unsafe fn __msa_mod_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_mod_u_d(a, b) +} + +/// Vector Move +/// +/// Copy all WRLEN bits in vector 'a'(eight signed 16-bit integer numbers) +/// to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(move.v))] +unsafe fn __msa_move_v(a: i8x16) -> i8x16 { + msa_move_v(a) +} + +/// Vector Fixed-Point Multiply and Subtract +/// +/// The product of fixed-point elements in vector 'c'(eight signed 16-bit integer numbers) +/// by fixed-point elements in vector 'b'(eight signed 16-bit integer numbers) +/// are subtracted from the fixed-point elements in vector 'a' +/// (eight signed 16-bit integer numbers).The multiplication result is not saturated, +/// i.e. exact (-1) * (-1) = 1 is subtracted from the destination. +/// The saturated fixed-point results are stored back to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msub_q.h))] +unsafe fn __msa_msub_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_msub_q_h(a, b, c) +} + +/// Vector Fixed-Point Multiply and Subtract +/// +/// The product of fixed-point elements in vector 'c'(four signed 32-bit integer numbers) +/// by fixed-point elements in vector 'b'(four signed 32-bit integer numbers) +/// are subtracted from the fixed-point elements in vector 'a' +/// (four signed 32-bit integer numbers).The multiplication result is not saturated, +/// i.e. exact (-1) * (-1) = 1 is subtracted from the destination. +/// The saturated fixed-point results are stored back to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msub_q.w))] +unsafe fn __msa_msub_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_msub_q_w(a, b, c) +} + +/// Vector Fixed-Point Multiply and Subtract Rounded +/// +/// The product of fixed-point elements in vector 'c'(eight signed 16-bit integer numbers) +/// by fixed-point elements in vector 'b'(eight signed 16-bit integer numbers) +/// are subtracted from the fixed-point elements in vector 'a' +/// (eight signed 16-bit integer numbers).The multiplication result is not saturated, +/// i.e. exact (-1) * (-1) = 1 is subtracted from the destination. +/// The rounded and saturated fixed-point results are stored back to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msubr_q.h))] +unsafe fn __msa_msubr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_msubr_q_h(a, b, c) +} + +/// Vector Fixed-Point Multiply and Subtract Rounded +/// +/// The product of fixed-point elements in vector 'c'(four signed 32-bit integer numbers) +/// by fixed-point elements in vector 'b'(four signed 32-bit integer numbers) +/// are subtracted from the fixed-point elements in vector 'a' +/// (four signed 32-bit integer numbers).The multiplication result is not saturated, +/// i.e. exact (-1) * (-1) = 1 is subtracted from the destination. +/// The rounded and saturated fixed-point results are stored back to vector 'a' +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msubr_q.w))] +unsafe fn __msa_msubr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_msubr_q_w(a, b, c) +} + +/// Vector Multiply and Subtract +/// +/// The integer elements in vector 'c'(sixteen signed 8-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(sixteen signed 8-bit integer numbers) +/// and subtracted from the integer elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msubv.b))] +unsafe fn __msa_msubv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { + msa_msubv_b(a, b, c) +} + +/// Vector Multiply and Subtract +/// +/// The integer elements in vector 'c'(eight signed 16-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(eight signed 16-bit integer numbers) +/// and subtracted from the integer elements in vector 'a'(eight signed 16-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msubv.h))] +unsafe fn __msa_msubv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_msubv_h(a, b, c) +} + +/// Vector Multiply and Subtract +/// +/// The integer elements in vector 'c'(four signed 32-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(four signed 32-bit integer numbers) +/// and subtracted from the integer elements in vector 'a'(four signed 32-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msubv.w))] +unsafe fn __msa_msubv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_msubv_w(a, b, c) +} + +/// Vector Multiply and Subtract +/// +/// The integer elements in vector 'c'(two signed 64-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(two signed 64-bit integer numbers) +/// and subtracted from the integer elements in vector 'a'(two signed 64-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(msubv.d))] +unsafe fn __msa_msubv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { + msa_msubv_d(a, b, c) +} + +/// Vector Fixed-Point Multiply +/// +/// The fixed-point elements in vector 'a'(eight signed 16-bit integer numbers) +/// multiplied by fixed-point elements in vector 'b'(eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mul_q.h))] +unsafe fn __msa_mul_q_h(a: i16x8, b: i16x8) -> i16x8 { + msa_mul_q_h(a, b) +} + +/// Vector Fixed-Point Multiply +/// +/// The fixed-point elements in vector 'a'(four signed 32-bit integer numbers) +/// multiplied by fixed-point elements in vector 'b'(four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mul_q.w))] +unsafe fn __msa_mul_q_w(a: i32x4, b: i32x4) -> i32x4 { + msa_mul_q_w(a, b) +} + +/// Vector Fixed-Point Multiply Rounded +/// +/// The fixed-point elements in vector 'a'(eight signed 16-bit integer numbers) +/// multiplied by fixed-point elements in vector 'b'(eight signed 16-bit integer numbers) +/// The rounded result is written to vector (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mulr_q.h))] +unsafe fn __msa_mulr_q_h(a: i16x8, b: i16x8) -> i16x8 { + msa_mulr_q_h(a, b) +} + +/// Vector Fixed-Point Multiply Rounded +/// +/// The fixed-point elements in vector 'a'(four signed 32-bit integer numbers) +/// multiplied by fixed-point elements in vector 'b'(four signed 32-bit integer numbers) +/// The rounded result is written to vector (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mulr_q.w))] +unsafe fn __msa_mulr_q_w(a: i32x4, b: i32x4) -> i32x4 { + msa_mulr_q_w(a, b) +} + +/// Vector Multiply +/// +/// The integer elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(sixteen signed 8-bit integer numbers) +/// The result is written to vector (sixteen signed 8-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mulv.b))] +unsafe fn __msa_mulv_b(a: i8x16, b: i8x16) -> i8x16 { + msa_mulv_b(a, b) +} + +/// Vector Multiply +/// +/// The integer elements in vector 'a'(eight signed 16-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mulv.h))] +unsafe fn __msa_mulv_h(a: i16x8, b: i16x8) -> i16x8 { + msa_mulv_h(a, b) +} + +/// Vector Multiply +/// +/// The integer elements in vector 'a'(four signed 32-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mulv.w))] +unsafe fn __msa_mulv_w(a: i32x4, b: i32x4) -> i32x4 { + msa_mulv_w(a, b) +} + +/// Vector Multiply +/// +/// The integer elements in vector 'a'(two signed 64-bit integer numbers) +/// are multiplied by integer elements in vector 'b'(two signed 64-bit integer numbers) +/// The result is written to vector (two signed 64-bit integer numbers) +/// The most significant half of the multiplication result is discarded. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(mulv.d))] +unsafe fn __msa_mulv_d(a: i64x2, b: i64x2) -> i64x2 { + msa_mulv_d(a, b) +} + +/// Vector Leading Ones Count +/// +/// The number of leading ones for elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// is stored to the elements in vector (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nloc.b))] +unsafe fn __msa_nloc_b(a: i8x16) -> i8x16 { + msa_nloc_b(a) +} + +/// Vector Leading Ones Count +/// +/// The number of leading ones for elements in vector 'a'(eight signed 16-bit integer numbers) +/// is stored to the elements in vector (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nloc.h))] +unsafe fn __msa_nloc_h(a: i16x8) -> i16x8 { + msa_nloc_h(a) +} + +/// Vector Leading Ones Count +/// +/// The number of leading ones for elements in vector 'a'(four signed 32-bit integer numbers) +/// is stored to the elements in vector (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nloc.w))] +unsafe fn __msa_nloc_w(a: i32x4) -> i32x4 { + msa_nloc_w(a) +} + +/// Vector Leading Ones Count +/// +/// The number of leading ones for elements in vector 'a'(two signed 64-bit integer numbers) +/// is stored to the elements in vector (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nloc.d))] +unsafe fn __msa_nloc_d(a: i64x2) -> i64x2 { + msa_nloc_d(a) +} + +/// Vector Leading Zeros Count +/// +/// The number of leading zeros for elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// is stored to the elements in vector (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nlzc.b))] +unsafe fn __msa_nlzc_b(a: i8x16) -> i8x16 { + msa_nlzc_b(a) +} + +/// Vector Leading Zeros Count +/// +/// The number of leading zeros for elements in vector 'a'(eight signed 16-bit integer numbers) +/// is stored to the elements in vector (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nlzc.h))] +unsafe fn __msa_nlzc_h(a: i16x8) -> i16x8 { + msa_nlzc_h(a) +} + +/// Vector Leading Zeros Count +/// +/// The number of leading zeros for elements in vector 'a'(four signed 32-bit integer numbers) +/// is stored to the elements in vector (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nlzc.w))] +unsafe fn __msa_nlzc_w(a: i32x4) -> i32x4 { + msa_nlzc_w(a) +} + +/// Vector Leading Zeros Count +/// +/// The number of leading zeros for elements in vector 'a'(two signed 64-bit integer numbers) +/// is stored to the elements in vector (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nlzc.d))] +unsafe fn __msa_nlzc_d(a: i64x2) -> i64x2 { + msa_nlzc_d(a) +} + +/// Vector Logical Negated Or +/// +/// Each bit of vector 'a'(sixteen unsigned 8-bit integer numbers) +/// is combined with the corresponding bit of vector 'b' (sixteen unsigned 8-bit integer numbers) +/// in a bitwise logical NOR operation. The result is written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nor.v))] +unsafe fn __msa_nor_v(a: u8x16, b: u8x16) -> u8x16 { + msa_nor_v(a, b) +} + +/// Immediate Logical Negated Or +/// +/// Each bit of vector 'a'(sixteen unsigned 8-bit integer numbers) +/// is combined with the 8-bit immediate imm8 +/// in a bitwise logical NOR operation. The result is written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(nori.b, imm8 = 0b11111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_nori_b(a: u8x16, imm8: i32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_nori_b(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Vector Logical Or +/// +/// Each bit of vector 'a'(sixteen unsigned 8-bit integer numbers) +/// is combined with the corresponding bit of vector 'b' (sixteen unsigned 8-bit integer numbers) +/// in a bitwise logical OR operation. The result is written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(or.v))] +unsafe fn __msa_or_v(a: u8x16, b: u8x16) -> u8x16 { + msa_or_v(a, b) +} + +/// Immediate Logical Or +/// +/// Each bit of vector 'a'(sixteen unsigned 8-bit integer numbers) +/// is combined with the 8-bit immediate imm8 +/// in a bitwise logical OR operation. The result is written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(ori.b, imm8 = 0b11111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_ori_b(a: u8x16, imm8: i32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_ori_b(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Vector Pack Even +/// +/// Even elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// are copied to the left half of the result vector and even elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckev.b))] +unsafe fn __msa_pckev_b(a: i8x16, b: i8x16) -> i8x16 { + msa_pckev_b(a, b) +} + +/// Vector Pack Even +/// +/// Even elements in vectors 'a' (eight signed 16-bit integer numbers) +/// are copied to the left half of the result vector and even elements in vector 'b' +/// (eight signed 16-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckev.h))] +unsafe fn __msa_pckev_h(a: i16x8, b: i16x8) -> i16x8 { + msa_pckev_h(a, b) +} + +/// Vector Pack Even +/// +/// Even elements in vectors 'a' (four signed 32-bit integer numbers) +/// are copied to the left half of the result vector and even elements in vector 'b' +/// (four signed 32-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckev.w))] +unsafe fn __msa_pckev_w(a: i32x4, b: i32x4) -> i32x4 { + msa_pckev_w(a, b) +} + +/// Vector Pack Even +/// +/// Even elements in vectors 'a' (two signed 64-bit integer numbers) +/// are copied to the left half of the result vector and even elements in vector 'b' +/// (two signed 64-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckev.d))] +unsafe fn __msa_pckev_d(a: i64x2, b: i64x2) -> i64x2 { + msa_pckev_d(a, b) +} + +/// Vector Pack Odd +/// +/// Odd elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// are copied to the left half of the result vector and odd elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckod.b))] +unsafe fn __msa_pckod_b(a: i8x16, b: i8x16) -> i8x16 { + msa_pckod_b(a, b) +} + +/// Vector Pack Odd +/// +/// Odd elements in vectors 'a' (eight signed 16-bit integer numbers) +/// are copied to the left half of the result vector and odd elements in vector 'b' +/// (eight signed 16-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckod.h))] +unsafe fn __msa_pckod_h(a: i16x8, b: i16x8) -> i16x8 { + msa_pckod_h(a, b) +} + +/// Vector Pack Odd +/// +/// Odd elements in vectors 'a' (four signed 32-bit integer numbers) +/// are copied to the left half of the result vector and odd elements in vector 'b' +/// (four signed 32-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckod.w))] +unsafe fn __msa_pckod_w(a: i32x4, b: i32x4) -> i32x4 { + msa_pckod_w(a, b) +} + +/// Vector Pack Odd +/// +/// Odd elements in vectors 'a' (two signed 64-bit integer numbers) +/// are copied to the left half of the result vector and odd elements in vector 'b' +/// (two signed 64-bit integer numbers) are copied to the right half of the result vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pckod.d))] +unsafe fn __msa_pckod_d(a: i64x2, b: i64x2) -> i64x2 { + msa_pckod_d(a, b) +} + +/// Vector Population Count +/// +/// The number of bits set to 1 for elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// is stored to the elements in the result vector (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pcnt.b))] +unsafe fn __msa_pcnt_b(a: i8x16) -> i8x16 { + msa_pcnt_b(a) +} + +/// Vector Population Count +/// +/// The number of bits set to 1 for elements in vector 'a' (eight signed 16-bit integer numbers) +/// is stored to the elements in the result vector (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pcnt.h))] +unsafe fn __msa_pcnt_h(a: i16x8) -> i16x8 { + msa_pcnt_h(a) +} + +/// Vector Population Count +/// +/// The number of bits set to 1 for elements in vector 'a' (four signed 32-bit integer numbers) +/// is stored to the elements in the result vector (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pcnt.w))] +unsafe fn __msa_pcnt_w(a: i32x4) -> i32x4 { + msa_pcnt_w(a) +} + +/// Vector Population Count +/// +/// The number of bits set to 1 for elements in vector 'a' (two signed 64-bit integer numbers) +/// is stored to the elements in the result vector (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(pcnt.d))] +unsafe fn __msa_pcnt_d(a: i64x2) -> i64x2 { + msa_pcnt_d(a) +} + +/// Immediate Signed Saturate +/// +/// Signed elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// are saturated to signed values of imm3+1 bits without changing the data width +/// The result is stored in the vector (sixteen signed 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_s.b, imm4 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_s_b(a: i8x16, imm3: i32) -> i8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_sat_s_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Signed Saturate +/// +/// Signed elements in vector 'a' (eight signed 16-bit integer numbers) +/// are saturated to signed values of imm4+1 bits without changing the data width +/// The result is stored in the vector (eight signed 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_s.h, imm3 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_s_h(a: i16x8, imm4: i32) -> i16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_sat_s_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Signed Saturate +/// +/// Signed elements in vector 'a' (four signed 32-bit integer numbers) +/// are saturated to signed values of imm5+1 bits without changing the data width +/// The result is stored in the vector (four signed 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_s.w, imm2 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_s_w(a: i32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_sat_s_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Signed Saturate +/// +/// Signed elements in vector 'a' (two signed 64-bit integer numbers) +/// are saturated to signed values of imm6+1 bits without changing the data width +/// The result is stored in the vector (two signed 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_s.d, imm1 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_s_d(a: i64x2, imm6: i32) -> i64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_sat_s_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Immediate Unsigned Saturate +/// +/// Unsigned elements in vector 'a' (sixteen unsigned 8-bit integer numbers) +/// are saturated to unsigned values of imm3+1 bits without changing the data width +/// The result is stored in the vector (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_u.b, imm4 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_u_b(a: u8x16, imm3: i32) -> u8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_sat_u_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Unsigned Saturate +/// +/// Unsigned elements in vector 'a' (eight unsigned 16-bit integer numbers) +/// are saturated to unsigned values of imm4+1 bits without changing the data width +/// The result is stored in the vector (eight unsigned 16-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_u.h, imm3 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_u_h(a: u16x8, imm4: i32) -> u16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_sat_u_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Unsigned Saturate +/// +/// Unsigned elements in vector 'a' (four unsigned 32-bit integer numbers) +/// are saturated to unsigned values of imm5+1 bits without changing the data width +/// The result is stored in the vector (four unsigned 32-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_u.w, imm2 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_u_w(a: u32x4, imm5: i32) -> u32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_sat_u_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Unsigned Saturate +/// +/// Unsigned elements in vector 'a' (two unsigned 64-bit integer numbers) +/// are saturated to unsigned values of imm6+1 bits without changing the data width +/// The result is stored in the vector (two unsigned 64-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sat_u.d, imm1 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_sat_u_d(a: u64x2, imm6: i32) -> u64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_sat_u_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Immediate Set Shuffle Elements +/// +/// The set shuffle instruction works on 4-element sets. +/// All sets are shuffled in the same way: the element i82i+1..2i in 'a' +/// (sixteen signed 8-bit integer numbers) is copied over the element i in result vector +/// (sixteen signed 8-bit integer numbers), where i is 0, 1, 2, 3. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(shf.b, imm8 = 0b11111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_shf_b(a: i8x16, imm8: i32) -> i8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_shf_b(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Immediate Set Shuffle Elements +/// +/// The set shuffle instruction works on 4-element sets. +/// All sets are shuffled in the same way: the element i82i+1..2i in 'a' +/// (eight signed 16-bit integer numbers) is copied over the element i in result vector +/// (eight signed 16-bit integer numbers), where i is 0, 1, 2, 3. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(shf.h, imm8 = 0b11111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_shf_h(a: i16x8, imm8: i32) -> i16x8 { + macro_rules! call { + ($imm8:expr) => { + msa_shf_h(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// Immediate Set Shuffle Elements +/// +/// The set shuffle instruction works on 4-element sets. +/// All sets are shuffled in the same way: the element i82i+1..2i in 'a' +/// (four signed 32-bit integer numbers) is copied over the element i in result vector +/// (four signed 32-bit integer numbers), where i is 0, 1, 2, 3. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(shf.w, imm8 = 0b11111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_shf_w(a: i32x4, imm8: i32) -> i32x4 { + macro_rules! call { + ($imm8:expr) => { + msa_shf_w(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +/// GPR Columns Slide +/// +/// Vector registers 'a' (sixteen signed 8-bit integer numbers) and 'b' +/// (sixteen signed 8-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by the number of columns given in GPR 'c'. +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, +/// or equivalently, the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sld.b))] +unsafe fn __msa_sld_b(a: i8x16, b: i8x16, c: i32) -> i8x16 { + msa_sld_b(a, b, c) +} + +/// GPR Columns Slide +/// +/// Vector registers 'a' (eight signed 16-bit integer numbers) and 'b' +/// (eight signed 16-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by the number of columns given in GPR 'c'. +/// The result is written to vector (eight signed 16-bit integer numbers). +/// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, +/// or equivalently, the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sld.h))] +unsafe fn __msa_sld_h(a: i16x8, b: i16x8, c: i32) -> i16x8 { + msa_sld_h(a, b, c) +} + +/// GPR Columns Slide +/// +/// Vector registers 'a' (four signed 32-bit integer numbers) and 'b' +/// (four signed 32-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by the number of columns given in GPR 'c'. +/// The result is written to vector (four signed 32-bit integer numbers). +/// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, +/// or equivalently, the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sld.w))] +unsafe fn __msa_sld_w(a: i32x4, b: i32x4, c: i32) -> i32x4 { + msa_sld_w(a, b, c) +} + +/// GPR Columns Slide +/// +/// Vector registers 'a' (two signed 64-bit integer numbers) and 'b' +/// (two signed 64-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by the number of columns given in GPR 'c'. +/// The result is written to vector (two signed 64-bit integer numbers). +/// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, +/// or equivalently, the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sld.d))] +unsafe fn __msa_sld_d(a: i64x2, b: i64x2, c: i32) -> i64x2 { + msa_sld_d(a, b, c) +} + +/// Immediate Columns Slide +/// +/// Vector registers 'a' (sixteen signed 8-bit integer numbers) and 'b' +/// (sixteen signed 8-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by imm1 columns +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_sldi_b(a: i8x16, b:i8x16, imm4: i32) -> i8x16 { + macro_rules! call { + ($imm4:expr) => { + msa_sldi_b(a, b, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Columns Slide +/// +/// Vector registers 'a' (eight signed 16-bit integer numbers) and 'b' +/// (eight signed 16-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by imm1 columns +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_sldi_h(a: i16x8, b:i16x8, imm3: i32) -> i16x8 { + macro_rules! call { + ($imm3:expr) => { + msa_sldi_h(a, b, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Columns Slide +/// +/// Vector registers 'a' (four signed 32-bit integer numbers) and 'b' +/// (four signed 32-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by imm1 columns +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))] +#[rustc_args_required_const(2)] +unsafe fn __msa_sldi_w(a: i32x4, b:i32x4, imm2: i32) -> i32x4 { + macro_rules! call { + ($imm2:expr) => { + msa_sldi_w(a, b, $imm2) + }; + } + constify_imm2!(imm2, call) +} + +/// Immediate Columns Slide +/// +/// Vector registers 'a' (two signed 64-bit integer numbers) and 'b' +/// (two signed 64-bit integer numbers) contain 2-dimensional byte arrays (rectangles) +/// stored row-wise with as many rows asbytes in integer data format df. +/// The two source rectangles 'b' and 'a' are concatenated horizontally in the order +/// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination +/// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' +/// by imm1 columns +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))] +#[rustc_args_required_const(2)] +unsafe fn __msa_sldi_d(a: i64x2, b:i64x2, imm1: i32) -> i64x2 { + macro_rules! call { + ($imm1:expr) => { + msa_sldi_d(a, b, $imm1) + }; + } + constify_imm1!(imm1, call) +} + +/// Vector Shift Left +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted left by the number of bits the elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sll.b))] +unsafe fn __msa_sll_b(a: i8x16, b: i8x16) -> i8x16 { + msa_sll_b(a, b) +} + +/// Vector Shift Left +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted left by the number of bits the elements in vector 'b' +/// (eight signed 16-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sll.h))] +unsafe fn __msa_sll_h(a: i16x8, b: i16x8) -> i16x8 { + msa_sll_h(a, b) +} + +/// Vector Shift Left +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted left by the number of bits the elements in vector 'b' +/// (four signed 32-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sll.w))] +unsafe fn __msa_sll_w(a: i32x4, b: i32x4) -> i32x4 { + msa_sll_w(a, b) +} + +/// Vector Shift Left +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted left by the number of bits the elements in vector 'b' +/// (two signed 64-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sll.d))] +unsafe fn __msa_sll_d(a: i64x2, b: i64x2) -> i64x2 { + msa_sll_d(a, b) +} + +/// Immediate Shift Left +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted left by the imm4 bits. +/// The result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(slli.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_slli_b(a: i8x16, imm4: i32) -> i8x16 { + macro_rules! call { + ($imm4:expr) => { + msa_slli_b(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Shift Left +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted left by the imm3 bits. +/// The result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(slli.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_slli_h(a: i16x8, imm3: i32) -> i16x8 { + macro_rules! call { + ($imm3:expr) => { + msa_slli_h(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Shift Left +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted left by the imm2 bits. +/// The result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(slli.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_slli_w(a: i32x4, imm2: i32) -> i32x4 { + macro_rules! call { + ($imm2:expr) => { + msa_slli_w(a, $imm2) + }; + } + constify_imm2!(imm2, call) +} + +/// Immediate Shift Left +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted left by the imm1 bits. +/// The result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(slli.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_slli_d(a: i64x2, imm1: i32) -> i64x2 { + macro_rules! call { + ($imm1:expr) => { + msa_slli_d(a, $imm1) + }; + } + constify_imm1!(imm1, call) +} + +/// GPR Element Splat +/// +/// Replicate vector 'a'(sixteen signed 8-bit integer numbers) +/// element with index given by GPR 'b' to all elements in vector +/// (sixteen signed 8-bit integer numbers) GPR 'b' value is interpreted +/// modulo the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splat.b))] +unsafe fn __msa_splat_b(a: i8x16, b: i32) -> i8x16 { + msa_splat_b(a, b) +} + +/// GPR Element Splat +/// +/// Replicate vector 'a'(eight signed 16-bit integer numbers) +/// element with index given by GPR 'b' to all elements in vector +/// (eight signed 16-bit integer numbers) GPR 'b' value is interpreted +/// modulo the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splat.h))] +unsafe fn __msa_splat_h(a: i16x8, b: i32) -> i16x8 { + msa_splat_h(a, b) +} + +/// GPR Element Splat +/// +/// Replicate vector 'a'(four signed 32-bit integer numbers) +/// element with index given by GPR 'b' to all elements in vector +/// (four signed 32-bit integer numbers) GPR 'b' value is interpreted +/// modulo the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splat.w))] +unsafe fn __msa_splat_w(a: i32x4, b: i32) -> i32x4 { + msa_splat_w(a, b) +} + +/// GPR Element Splat +/// +/// Replicate vector 'a'(two signed 64-bit integer numbers) +/// element with index given by GPR 'b' to all elements in vector +/// (two signed 64-bit integer numbers) GPR 'b' value is interpreted +/// modulo the number of data format df elements in the destination vector. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splat.d))] +unsafe fn __msa_splat_d(a: i64x2, b: i32) -> i64x2 { + msa_splat_d(a, b) +} + +/// Immediate Element Splat +/// +/// Replicate element imm4 in vector 'a'(sixteen signed 8-bit integer numbers) +/// to all elements in vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splati.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_splati_b(a: i8x16, imm4: i32) -> i8x16 { + macro_rules! call { + ($imm4:expr) => { + msa_splati_b(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Element Splat +/// +/// Replicate element imm3 in vector 'a'(eight signed 16-bit integer numbers) +/// to all elements in vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splati.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_splati_h(a: i16x8, imm3: i32) -> i16x8 { + macro_rules! call { + ($imm3:expr) => { + msa_splati_h(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Element Splat +/// +/// Replicate element imm2 in vector 'a'(four signed 32-bit integer numbers) +/// to all elements in vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splati.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_splati_w(a: i32x4, imm2: i32) -> i32x4 { + macro_rules! call { + ($imm2:expr) => { + msa_splati_w(a, $imm2) + }; + } + constify_imm2!(imm2, call) +} + +/// Immediate Element Splat +/// +/// Replicate element imm1 in vector 'a'(two signed 64-bit integer numbers) +/// to all elements in vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(splati.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_splati_d(a: i64x2, imm1: i32) -> i64x2 { + macro_rules! call { + ($imm1:expr) => { + msa_splati_d(a, $imm1) + }; + } + constify_imm1!(imm1, call) +} + +/// Vector Shift Right Arithmetic +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sra.b))] +unsafe fn __msa_sra_b(a: i8x16, b: i8x16) -> i8x16 { + msa_sra_b(a, b) +} + +/// Vector Shift Right Arithmetic +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (eight signed 16-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sra.h))] +unsafe fn __msa_sra_h(a: i16x8, b: i16x8) -> i16x8 { + msa_sra_h(a, b) +} + +/// Vector Shift Right Arithmetic +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (four signed 32-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sra.w))] +unsafe fn __msa_sra_w(a: i32x4, b: i32x4) -> i32x4 { + msa_sra_w(a, b) +} + +/// Vector Shift Right Arithmetic +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (two signed 64-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(sra.d))] +unsafe fn __msa_sra_d(a: i64x2, b: i64x2) -> i64x2 { + msa_sra_d(a, b) +} + +/// Immediate Shift Right Arithmetic +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right arithmetic by imm3 bits. +/// The result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srai.b, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srai_b(a: i8x16, imm3: i32) -> i8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_srai_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Shift Right Arithmetic +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right arithmetic by imm4 bits. +/// The result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srai.h, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srai_h(a: i16x8, imm4: i32) -> i16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_srai_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Shift Right Arithmetic +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right arithmetic by imm5 bits. +/// The result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srai.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srai_w(a: i32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_srai_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Shift Right Arithmetic +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right arithmetic by imm6 bits. +/// The result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srai.d, imm6 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srai_d(a: i64x2, imm6: i32) -> i64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_srai_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Vector Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srar.b))] +unsafe fn __msa_srar_b(a: i8x16, b: i8x16) -> i8x16 { + msa_srar_b(a, b) +} + +/// Vector Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (eight signed 16-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srar.h))] +unsafe fn __msa_srar_h(a: i16x8, b: i16x8) -> i16x8 { + msa_srar_h(a, b) +} + +/// Vector Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (four signed 32-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srar.w))] +unsafe fn __msa_srar_w(a: i32x4, b: i32x4) -> i32x4 { + msa_srar_w(a, b) +} + +/// Vector Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right arithmetic by the number of bits the elements in vector 'b' +/// (two signed 64-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srar.d))] +unsafe fn __msa_srar_d(a: i64x2, b: i64x2) -> i64x2 { + msa_srar_d(a, b) +} + +/// Immediate Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right arithmetic by imm3 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srari.b, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srari_b(a: i8x16, imm3: i32) -> i8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_srari_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right arithmetic by imm4 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srari.h, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srari_h(a: i16x8, imm4: i32) -> i16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_srari_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right arithmetic by imm5 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srari.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srari_w(a: i32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_srari_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Shift Right Arithmetic Rounded +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right arithmetic by imm6 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srari.d, imm6 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srari_d(a: i64x2, imm6: i32) -> i64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_srari_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Vector Shift Right Logical +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srl.b))] +unsafe fn __msa_srl_b(a: i8x16, b: i8x16) -> i8x16 { + msa_srl_b(a, b) +} + +/// Vector Shift Right Logical +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (eight signed 16-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srl.h))] +unsafe fn __msa_srl_h(a: i16x8, b: i16x8) -> i16x8 { + msa_srl_h(a, b) +} + +/// Vector Shift Right Logical +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (four signed 32-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srl.w))] +unsafe fn __msa_srl_w(a: i32x4, b: i32x4) -> i32x4 { + msa_srl_w(a, b) +} + +/// Vector Shift Right Logical +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (two signed 64-bit integer numbers) specify modulo the size of the +/// element in bits.The result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srl.d))] +unsafe fn __msa_srl_d(a: i64x2, b: i64x2) -> i64x2 { + msa_srl_d(a, b) +} + +/// Immediate Shift Right Logical +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right logical by imm4 bits. +/// The result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srli.b, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srli_b(a: i8x16, imm4: i32) -> i8x16 { + macro_rules! call { + ($imm4:expr) => { + msa_srli_b(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Shift Right Logical +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right logical by imm3 bits. +/// The result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srli.h, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srli_h(a: i16x8, imm3: i32) -> i16x8 { + macro_rules! call { + ($imm3:expr) => { + msa_srli_h(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Shift Right Logical +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right logical by imm2 bits. +/// The result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srli.w, imm2 = 0b11))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srli_w(a: i32x4, imm2: i32) -> i32x4 { + macro_rules! call { + ($imm2:expr) => { + msa_srli_w(a, $imm2) + }; + } + constify_imm2!(imm2, call) +} + +/// Immediate Shift Right Logical +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right logical by imm1 bits. +/// The result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srli.d, imm1 = 0b1))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srli_d(a: i64x2, imm1: i32) -> i64x2 { + macro_rules! call { + ($imm1:expr) => { + msa_srli_d(a, $imm1) + }; + } + constify_imm1!(imm1, call) +} + +/// Vector Shift Right Logical Rounded +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (sixteen signed 8-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlr.b))] +unsafe fn __msa_srlr_b(a: i8x16, b: i8x16) -> i8x16 { + msa_srlr_b(a, b) +} + +/// Vector Shift Right Logical Rounded +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (eight signed 16-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlr.h))] +unsafe fn __msa_srlr_h(a: i16x8, b: i16x8) -> i16x8 { + msa_srlr_h(a, b) +} + +/// Vector Shift Right Logical Rounded +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (four signed 32-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlr.w))] +unsafe fn __msa_srlr_w(a: i32x4, b: i32x4) -> i32x4 { + msa_srlr_w(a, b) +} + +/// Vector Shift Right Logical Rounded +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right logical by the number of bits the elements in vector 'b' +/// (two signed 64-bit integer numbers) specify modulo the size of the +/// element in bits.The most significant discarded bit is added to the shifted +/// value (for rounding) and the result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlr.d))] +unsafe fn __msa_srlr_d(a: i64x2, b: i64x2) -> i64x2 { + msa_srlr_d(a, b) +} + +/// Immediate Shift Right Logical Rounded +/// +/// The elements in vector 'a'(sixteen signed 8-bit integer numbers) +/// are shifted right logical by imm6 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlri.b, imm3 = 0b111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srlri_b(a: i8x16, imm3: i32) -> i8x16 { + macro_rules! call { + ($imm3:expr) => { + msa_srlri_b(a, $imm3) + }; + } + constify_imm3!(imm3, call) +} + +/// Immediate Shift Right Logical Rounded +/// +/// The elements in vector 'a'(eight signed 16-bit integer numbers) +/// are shifted right logical by imm6 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlri.h, imm4 = 0b1111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srlri_h(a: i16x8, imm4: i32) -> i16x8 { + macro_rules! call { + ($imm4:expr) => { + msa_srlri_h(a, $imm4) + }; + } + constify_imm4!(imm4, call) +} + +/// Immediate Shift Right Logical Rounded +/// +/// The elements in vector 'a'(four signed 32-bit integer numbers) +/// are shifted right logical by imm6 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlri.w, imm5 = 0b11111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srlri_w(a: i32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_srlri_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Shift Right Logical Rounded +/// +/// The elements in vector 'a'(two signed 64-bit integer numbers) +/// are shifted right logical by imm6 bits.The most significant +/// discarded bit is added to the shifted value (for rounding) and +/// the result is written to vector(two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(srlri.d, imm6 = 0b111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_srlri_d(a: i64x2, imm6: i32) -> i64x2 { + macro_rules! call { + ($imm6:expr) => { + msa_srlri_d(a, $imm6) + }; + } + constify_imm6!(imm6, call) +} + +/// Vector Store +/// +/// TheWRLEN / 8 bytes in vector 'a'(sixteen signed 8-bit integer numbers) +/// are stored as elements of data format df at the effective memory location +/// addressed by the base mem_addr and the 10-bit signed immediate offset imm_s10 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_st_b(a: i8x16, mem_addr: *mut i8, imm_s10: i32) -> () { + macro_rules! call { + ($imm_s10:expr) => { + msa_st_b(a, mem_addr, $imm_s10) + }; + } + constify_imm_s10!(imm_s10, call) +} + +/// Vector Store +/// +/// TheWRLEN / 8 bytes in vector 'a'(eight signed 16-bit integer numbers) +/// are stored as elements of data format df at the effective memory location +/// addressed by the base mem_addr and the 11-bit signed immediate offset imm_s11 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_st_h(a: i16x8, mem_addr: *mut i8, imm_s11: i32) -> () { + macro_rules! call { + ($imm_s11:expr) => { + msa_st_h(a, mem_addr, $imm_s11) + }; + } + constify_imm_s11!(imm_s11, call) +} + +/// Vector Store +/// +/// TheWRLEN / 8 bytes in vector 'a'(four signed 32-bit integer numbers) +/// are stored as elements of data format df at the effective memory location +/// addressed by the base mem_addr and the 12-bit signed immediate offset imm_s12 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_st_w(a: i32x4, mem_addr: *mut i8, imm_s12: i32) -> () { + macro_rules! call { + ($imm_s12:expr) => { + msa_st_w(a, mem_addr, $imm_s12) + }; + } + constify_imm_s12!(imm_s12, call) +} + +/// Vector Store +/// +/// TheWRLEN / 8 bytes in vector 'a'(two signed 64-bit integer numbers) +/// are stored as elements of data format df at the effective memory location +/// addressed by the base mem_addr and the 13-bit signed immediate offset imm_s13 +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))] +#[rustc_args_required_const(2)] +unsafe fn __msa_st_d(a: i64x2, mem_addr: *mut i8, imm_s13: i32) -> () { + macro_rules! call { + ($imm_s13:expr) => { + msa_st_d(a, mem_addr, $imm_s13) + }; + } + constify_imm_s13!(imm_s13, call) +} + +/// Vector Signed Saturated Subtract of Signed Values +/// +/// The elements in vector `b` (sixteen signed 8-bit integer numbers) +/// are subtracted from the elements in vector `a` (sixteen signed 8-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_s.b))] +unsafe fn __msa_subs_s_b(a: i8x16, b: i8x16) -> i8x16 { + msa_subs_s_b(a, b) +} + +/// Vector Signed Saturated Subtract of Signed Values +/// +/// The elements in vector `b` (eight signed 16-bit integer numbers) +/// are subtracted from the elements in vector `a` (eight signed 16-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_s.h))] +unsafe fn __msa_subs_s_h(a: i16x8, b: i16x8) -> i16x8 { + msa_subs_s_h(a, b) +} + +/// Vector Signed Saturated Subtract of Signed Values +/// +/// The elements in vector `b` (four signed 32-bit integer numbers) +/// are subtracted from the elements in vector `a` (four signed 32-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_s.w))] +unsafe fn __msa_subs_s_w(a: i32x4, b: i32x4) -> i32x4 { + msa_subs_s_w(a, b) +} + +/// Vector Signed Saturated Subtract of Signed Values +/// +/// The elements in vector `b` (two signed 64-bit integer numbers) +/// are subtracted from the elements in vector `a` (two signed 64-bit integer numbers) +/// Signed arithmetic is performed and overflows clamp to the largest and/or smallest +/// representable signed values before writing the result to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_s.d))] +unsafe fn __msa_subs_s_d(a: i64x2, b: i64x2) -> i64x2 { + msa_subs_s_d(a, b) +} + +/// Vector Unsigned Saturated Subtract of Unsigned Values +/// +/// The elements in vector `b` (sixteen unsigned 8-bit integer numbers) +/// are subtracted from the elements in vector `a` (sixteen unsigned 8-bit integer numbers) +/// Unsigned arithmetic is performed and under-flows clamp to 0 before writing +/// the result to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_u.b))] +unsafe fn __msa_subs_u_b(a: u8x16, b: u8x16) -> u8x16 { + msa_subs_u_b(a, b) +} + +/// Vector Unsigned Saturated Subtract of Unsigned Values +/// +/// The elements in vector `b` (eight unsigned 16-bit integer numbers) +/// are subtracted from the elements in vector `a` (eight unsigned 16-bit integer numbers) +/// Unsigned arithmetic is performed and under-flows clamp to 0 before writing +/// the result to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_u.h))] +unsafe fn __msa_subs_u_h(a: u16x8, b: u16x8) -> u16x8 { + msa_subs_u_h(a, b) +} + +/// Vector Unsigned Saturated Subtract of Unsigned Values +/// +/// The elements in vector `b` (four unsigned 32-bit integer numbers) +/// are subtracted from the elements in vector `a` (four unsigned 32-bit integer numbers) +/// Unsigned arithmetic is performed and under-flows clamp to 0 before writing +/// the result to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_u.w))] +unsafe fn __msa_subs_u_w(a: u32x4, b: u32x4) -> u32x4 { + msa_subs_u_w(a, b) +} + +/// Vector Unsigned Saturated Subtract of Unsigned Values +/// +/// The elements in vector `b` (two unsigned 64-bit integer numbers) +/// are subtracted from the elements in vector `a` (two unsigned 64-bit integer numbers) +/// Unsigned arithmetic is performed and under-flows clamp to 0 before writing +/// the result to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subs_u.d))] +unsafe fn __msa_subs_u_d(a: u64x2, b: u64x2) -> u64x2 { + msa_subs_u_d(a, b) +} + +/// Vector Unsigned Saturated Subtract of Signed from Unsigned +/// +/// The signed elements in vector `b` (sixteen signed 8-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (sixteen unsigned 8-bit integer numbers) +/// The signed result is unsigned saturated and written to +/// to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsus_u.b))] +unsafe fn __msa_subsus_u_b(a: u8x16, b: i8x16) -> u8x16 { + msa_subsus_u_b(a, b) +} + +/// Vector Unsigned Saturated Subtract of Signed from Unsigned +/// +/// The signed elements in vector `b` (eight signed 16-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (eight unsigned 16-bit integer numbers) +/// The signed result is unsigned saturated and written to +/// to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsus_u.h))] +unsafe fn __msa_subsus_u_h(a: u16x8, b: i16x8) -> u16x8 { + msa_subsus_u_h(a, b) +} + +/// Vector Unsigned Saturated Subtract of Signed from Unsigned +/// +/// The signed elements in vector `b` (four signed 6432it integer numbers) +/// are subtracted from the unsigned elements in vector `a` (four unsigned 32-bit integer numbers) +/// The signed result is unsigned saturated and written to +/// to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsus_u.w))] +unsafe fn __msa_subsus_u_w(a: u32x4, b: i32x4) -> u32x4 { + msa_subsus_u_w(a, b) +} + +/// Vector Unsigned Saturated Subtract of Signed from Unsigned +/// +/// The signed elements in vector `b` (two signed 64-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (two unsigned 64-bit integer numbers) +/// The signed result is unsigned saturated and written to +/// to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsus_u.d))] +unsafe fn __msa_subsus_u_d(a: u64x2, b: i64x2) -> u64x2 { + msa_subsus_u_d(a, b) +} + +/// Vector Signed Saturated Subtract of Unsigned Values +/// +/// The unsigned elements in vector `b` (sixteen unsigned 8-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (sixteen unsigned 8-bit integer numbers) +/// The signed result is signed saturated and written to +/// to vector (sixteen unsigned 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsuu_s.b))] +unsafe fn __msa_subsuu_s_b(a: u8x16, b: u8x16) -> i8x16 { + msa_subsuu_s_b(a, b) +} + +/// Vector Signed Saturated Subtract of Unsigned Values +/// +/// The unsigned elements in vector `b` (eight unsigned 16-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (eight unsigned 16-bit integer numbers) +/// The signed result is signed saturated and written to +/// to vector (eight unsigned 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsuu_s.h))] +unsafe fn __msa_subsuu_s_h(a: u16x8, b: u16x8) -> i16x8 { + msa_subsuu_s_h(a, b) +} + +/// Vector Signed Saturated Subtract of Unsigned Values +/// +/// The unsigned elements in vector `b` (four unsigned 32-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (four unsigned 32-bit integer numbers) +/// The signed result is signed saturated and written to +/// to vector (four unsigned 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsuu_s.w))] +unsafe fn __msa_subsuu_s_w(a: u32x4, b: u32x4) -> i32x4 { + msa_subsuu_s_w(a, b) +} + +/// Vector Signed Saturated Subtract of Unsigned Values +/// +/// The unsigned elements in vector `b` (two unsigned 64-bit integer numbers) +/// are subtracted from the unsigned elements in vector `a` (two unsigned 64-bit integer numbers) +/// The signed result is signed saturated and written to +/// to vector (two unsigned 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subsuu_s.d))] +unsafe fn __msa_subsuu_s_d(a: u64x2, b: u64x2) -> i64x2 { + msa_subsuu_s_d(a, b) +} + +/// Vector Subtract +/// +/// The elements in vector `b` (sixteen signed 8-bit integer numbers) +/// are subtracted from the elements in vector `a` (sixteen signed 8-bit integer numbers) +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subv.b))] +unsafe fn __msa_subv_b(a: i8x16, b: i8x16) -> i8x16 { + msa_subv_b(a, b) +} + +/// Vector Subtract +/// +/// The elements in vector `b` (eight signed 16-bit integer numbers) +/// are subtracted from the elements in vector `a` (eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subv.h))] +unsafe fn __msa_subv_h(a: i16x8, b: i16x8) -> i16x8 { + msa_subv_h(a, b) +} + +/// Vector Subtract +/// +/// The elements in vector `b` (four signed 32-bit integer numbers) +/// are subtracted from the elements in vector `a` (four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subv.w))] +unsafe fn __msa_subv_w(a: i32x4, b: i32x4) -> i32x4 { + msa_subv_w(a, b) +} + +/// Vector Subtract +/// +/// The elements in vector `b` (two signed 64-bit integer numbers) +/// are subtracted from the elements in vector `a` (two signed 64-bit integer numbers) +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subv.d))] +unsafe fn __msa_subv_d(a: i64x2, b: i64x2) -> i64x2 { + msa_subv_d(a, b) +} + +/// Immediate Subtract +/// +/// The 5-bit immediate unsigned value imm5 +/// are subtracted from the elements in vector `a` (sixteen signed 8-bit integer numbers) +/// The result is written to vector (sixteen signed 8-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subvi.b, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_subvi_b(a: i8x16, imm5: i32) -> i8x16 { + macro_rules! call { + ($imm5:expr) => { + msa_subvi_b(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Subtract +/// +/// The 5-bit immediate unsigned value imm5 +/// are subtracted from the elements in vector `a` (eight signed 16-bit integer numbers) +/// The result is written to vector (eight signed 16-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subvi.h, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_subvi_h(a: i16x8, imm5: i32) -> i16x8 { + macro_rules! call { + ($imm5:expr) => { + msa_subvi_h(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Subtract +/// +/// The 5-bit immediate unsigned value imm5 +/// are subtracted from the elements in vector `a` (four signed 32-bit integer numbers) +/// The result is written to vector (four signed 32-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subvi.w, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_subvi_w(a: i32x4, imm5: i32) -> i32x4 { + macro_rules! call { + ($imm5:expr) => { + msa_subvi_w(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Immediate Subtract +/// +/// The 5-bit immediate unsigned value imm5 +/// are subtracted from the elements in vector `a` (two signed 64-bit integer numbers) +/// The result is written to vector (two signed 64-bit integer numbers). +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(subvi.d, imm5 = 0b10111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_subvi_d(a: i64x2, imm5: i32) -> i64x2 { + macro_rules! call { + ($imm5:expr) => { + msa_subvi_d(a, $imm5) + }; + } + constify_imm5!(imm5, call) +} + +/// Vector Data Preserving Shuffle +/// +/// The vector shuffle instructions selectively copy data elements from the +/// concatenation of vectors 'b' (sixteen signed 8-bit integer numbers) +/// and `c` (sixteen signed 8-bit integer numbers) in to vector 'a' +/// (sixteen signed 8-bit integer numbers) based on the corresponding control element in 'a' +/// The least significant 6 bits in 'a' control elements modulo the number of elements in +/// the concatenated vectors 'b','a' specify the index of the source element. +/// If bit 6 or bit 7 is 1, there will be no copy, but rather the destination elementis set to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(vshf.b))] +unsafe fn __msa_vshf_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { + msa_vshf_b(a, b, c) +} + +/// Vector Data Preserving Shuffle +/// +/// The vector shuffle instructions selectively copy data elements from the +/// concatenation of vectors 'b' (eight signed 16-bit integer numbers) +/// and `c` (eight signed 16-bit integer numbers) in to vector 'a' +/// (eight signed 16-bit integer numbers) based on the corresponding control element in 'a' +/// The least significant 6 bits in 'a' control elements modulo the number of elements in +/// the concatenated vectors 'b','a' specify the index of the source element. +/// If bit 6 or bit 7 is 1, there will be no copy, but rather the destination elementis set to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(vshf.h))] +unsafe fn __msa_vshf_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { + msa_vshf_h(a, b, c) +} + +/// Vector Data Preserving Shuffle +/// +/// The vector shuffle instructions selectively copy data elements from the +/// concatenation of vectors 'b' (four signed 32-bit integer numbers) +/// and `c` (four signed 32-bit integer numbers) in to vector 'a' +/// (four signed 32-bit integer numbers) based on the corresponding control element in 'a' +/// The least significant 6 bits in 'a' control elements modulo the number of elements in +/// the concatenated vectors 'b','a' specify the index of the source element. +/// If bit 6 or bit 7 is 1, there will be no copy, but rather the destination elementis set to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(vshf.w))] +unsafe fn __msa_vshf_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { + msa_vshf_w(a, b, c) +} + +/// Vector Data Preserving Shuffle +/// +/// The vector shuffle instructions selectively copy data elements from the +/// concatenation of vectors 'b' (two signed 64-bit integer numbers) +/// and `c` (two signed 64-bit integer numbers) in to vector 'a' +/// (two signed 64-bit integer numbers) based on the corresponding control element in 'a' +/// The least significant 6 bits in 'a' control elements modulo the number of elements in +/// the concatenated vectors 'b','a' specify the index of the source element. +/// If bit 6 or bit 7 is 1, there will be no copy, but rather the destination elementis set to 0. +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(vshf.d))] +unsafe fn __msa_vshf_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { + msa_vshf_d(a, b, c) +} + +/// Vector Logical Exclusive Or +/// +/// Each bit of vector 'a'(sixteen unsigned 8-bit integer numbers) +/// is combined with the corresponding bit of vector 'b' (sixteen unsigned 8-bit integer numbers) +/// in a bitwise logical XOR operation. The result is written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(xor.v))] +unsafe fn __msa_xor_v(a: u8x16, b: u8x16) -> u8x16 { + msa_xor_v(a, b) +} + +/// Immediate Logical Exclusive Or +/// +/// Each byte of vector 'a'(sixteen unsigned 8-bit integer numbers) +/// is combined with the 8-bit immediate imm8 +/// in a bitwise logical XOR operation. The result is written to vector +/// (sixteen unsigned 8-bit integer numbers) +/// +#[inline] +#[target_feature(enable = "msa")] +#[cfg_attr(test, assert_instr(xori.b, imm8 = 0b11111111))] +#[rustc_args_required_const(1)] +unsafe fn __msa_xori_b(a: u8x16, imm8: i32) -> u8x16 { + macro_rules! call { + ($imm8:expr) => { + msa_xori_b(a, $imm8) + }; + } + constify_imm8!(imm8, call) +} + +#[cfg(test)] +mod tests { + use std::f32; + use std::f64; + use core_arch::mips::msa::*; + use stdsimd_test::simd_test; + + #[simd_test(enable = "msa")] + unsafe fn test_msa_add_a_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + -4, -3, -2, -1, + -4, -3, -2, -1, + -4, -3, -2, -1, + -4, -3, -2, -1 + ); + let r = i8x16( + 5, 5, 5, 5, + 5, 5, 5, 5, + 5, 5, 5, 5, + 5, 5, 5, 5 + ); + + assert_eq!(r, __msa_add_a_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_add_a_h() { + #[rustfmt::skip] + let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = i16x8(-4, -3, -2, -1, -4, -3, -2, -1); + let r = i16x8(5, 5, 5, 5, 5, 5, 5, 5); + + assert_eq!(r, __msa_add_a_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_add_a_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(-4, -3, -2, -1); + let r = i32x4(5, 5, 5, 5); + + assert_eq!(r, __msa_add_a_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_add_a_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(-4, -3); + let r = i64x2(5, 5); + + assert_eq!(r, __msa_add_a_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_a_b() { + #[rustfmt::skip] + let a = i8x16( + 100, i8::max_value(), 100, i8::max_value(), + 100, i8::max_value(), 100, i8::max_value(), + 100, i8::max_value(), 100, i8::max_value(), + 100, i8::max_value(), 100, i8::max_value() + ); + #[rustfmt::skip] + let b = i8x16( + -4, -3, -2, -100, + -4, -3, -2, -100, + -4, -3, -2, -100, + -4, -3, -2, -100 + ); + let r = i8x16( + 104, 127, 102, 127, + 104, 127, 102, 127, + 104, 127, 102, 127, + 104, 127, 102, 127 + ); + + assert_eq!(r, __msa_adds_a_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_a_h() { + #[rustfmt::skip] + let a = i16x8( + 100, i16::max_value(), 100, i16::max_value(), + 100, i16::max_value(), 100, i16::max_value() + ); + #[rustfmt::skip] + let b = i16x8(-4, -3, -2, -1, -4, -3, -2, -1); + let r = i16x8( + 104, i16::max_value(), 102, i16::max_value(), + 104, i16::max_value(), 102, i16::max_value() + ); + + assert_eq!(r, __msa_adds_a_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_a_w() { + #[rustfmt::skip] + let a = i32x4(100, i32::max_value(), 100, i32::max_value()); + #[rustfmt::skip] + let b = i32x4(-4, -3, -2, -1); + let r = i32x4(104, i32::max_value(), 102, i32::max_value()); + + assert_eq!(r, __msa_adds_a_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_a_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::max_value()); + #[rustfmt::skip] + let b = i64x2(-4, -3); + let r = i64x2(104, i64::max_value()); + + assert_eq!(r, __msa_adds_a_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_s_b() { + #[rustfmt::skip] + let a = i8x16( + 100, i8::min_value(), 100, i8::max_value(), + 100, i8::min_value(), 100, i8::max_value(), + 100, i8::min_value(), 100, i8::max_value(), + 100, i8::min_value(), 100, i8::max_value() + ); + #[rustfmt::skip] + let b = i8x16( + -4, -3, -2, 100, + -4, -3, -2, 100, + -4, -3, -2, 100, + -4, -3, -2, 100 + ); + let r = i8x16( + 96, i8::min_value(), 98, i8::max_value(), + 96, i8::min_value(), 98, i8::max_value(), + 96, i8::min_value(), 98, i8::max_value(), + 96, i8::min_value(), 98, i8::max_value() + ); + + assert_eq!(r, __msa_adds_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_s_h() { + #[rustfmt::skip] + let a = i16x8( + 100, i16::min_value(), 100, i16::max_value(), + 100, i16::min_value(), 100, i16::max_value() + ); + #[rustfmt::skip] + let b = i16x8(-4, -3, -2, 1, -4, -3, -2, 1); + let r = i16x8( + 96, i16::min_value(), 98, i16::max_value(), + 96, i16::min_value(), 98, i16::max_value() + ); + + assert_eq!(r, __msa_adds_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_s_w() { + #[rustfmt::skip] + let a = i32x4(100, i32::max_value(), 100, i32::min_value()); + #[rustfmt::skip] + let b = i32x4(-4, 3, -2, -1); + let r = i32x4(96, i32::max_value(), 98, i32::min_value()); + + assert_eq!(r, __msa_adds_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_s_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::min_value()); + #[rustfmt::skip] + let b = i64x2(-4, -3); + let r = i64x2(96, i64::min_value()); + + assert_eq!(r, __msa_adds_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_u_b() { + #[rustfmt::skip] + let a = u8x16( + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value() + ); + #[rustfmt::skip] + let b = u8x16( + 4, 3, 2, 100, + 4, 3, 2, 100, + 4, 3, 2, 100, + 4, 3, 2, 100 + ); + let r = u8x16( + 104, u8::max_value(), 102, u8::max_value(), + 104, u8::max_value(), 102, u8::max_value(), + 104, u8::max_value(), 102, u8::max_value(), + 104, u8::max_value(), 102, u8::max_value() + ); + + assert_eq!(r, __msa_adds_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_u_h() { + #[rustfmt::skip] + let a = u16x8( + 100, u16::max_value(), 100, u16::max_value(), + 100, u16::max_value(), 100, u16::max_value() + ); + #[rustfmt::skip] + let b = u16x8(4, 3, 2, 1, 4, 3, 2, 1); + let r = u16x8( + 104, u16::max_value(), 102, u16::max_value(), + 104, u16::max_value(), 102, u16::max_value() + ); + + assert_eq!(r, __msa_adds_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_u_w() { + #[rustfmt::skip] + let a = u32x4(100, u32::max_value(), 100, u32::max_value()); + #[rustfmt::skip] + let b = u32x4(4, 3, 2, 1); + let r = u32x4(104, u32::max_value(), 102, u32::max_value()); + + assert_eq!(r, __msa_adds_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_adds_u_d() { + #[rustfmt::skip] + let a = u64x2(100, u64::max_value()); + #[rustfmt::skip] + let b = u64x2(4, 3); + let r = u64x2(104, u64::max_value()); + + assert_eq!(r, __msa_adds_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addv_b() { + #[rustfmt::skip] + let a = i8x16( + 100, i8::min_value(), 100, i8::max_value(), + 100, i8::min_value(), 100, i8::max_value(), + 100, i8::min_value(), 100, i8::max_value(), + 100, i8::min_value(), 100, i8::max_value() + ); + #[rustfmt::skip] + let b = i8x16( + -4, -3, -2, 100, + -4, -3, -2, 100, + -4, -3, -2, 100, + -4, -3, -2, 100 + ); + let r = i8x16( + 96, 125, 98, -29, + 96, 125, 98, -29, + 96, 125, 98, -29, + 96, 125, 98, -29 + ); + + assert_eq!(r, __msa_addv_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addv_h() { + #[rustfmt::skip] + let a = i16x8( + 100, i16::min_value(), 100, i16::max_value(), + 100, i16::min_value(), 100, i16::max_value() + ); + #[rustfmt::skip] + let b = i16x8(-4, -3, -2, 1, -4, -3, -2, 1); + let r = i16x8(96, 32765, 98, -32768, 96, 32765, 98, -32768); + + assert_eq!(r, __msa_addv_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addv_w() { + #[rustfmt::skip] + let a = i32x4(100, i32::max_value(), 100, i32::min_value()); + #[rustfmt::skip] + let b = i32x4(-4, 3, -2, -1); + let r = i32x4(96, -2147483646, 98, 2147483647); + + assert_eq!(r, __msa_addv_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addv_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::min_value()); + #[rustfmt::skip] + let b = i64x2(-4, -3); + let r = i64x2(96, 9223372036854775805); + + assert_eq!(r, __msa_addv_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addvi_b() { + #[rustfmt::skip] + let a = i8x16( + 100, i8::max_value(), 100, i8::max_value(), + 100, i8::max_value(), 100, i8::max_value(), + 100, i8::max_value(), 100, i8::max_value(), + 100, i8::max_value(), 100, i8::max_value() + ); + let r = i8x16( + 103, -126, 103, -126, + 103, -126, 103, -126, + 103, -126, 103, -126, + 103, -126, 103, -126 + ); + + assert_eq!(r, __msa_addvi_b(a, 67)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addvi_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 3276, -100, -127, + i16::max_value(), 3276, -100, -127 + ); + let r = i16x8( + -32766, 3279, -97, -124, + -32766, 3279, -97, -124 + ); + + assert_eq!(r, __msa_addvi_h(a, 67)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addvi_w() { + #[rustfmt::skip] + let a = i32x4(100, i32::max_value(), 100, i32::min_value()); + let r = i32x4(103, -2147483646, 103, -2147483645); + + assert_eq!(r, __msa_addvi_w(a, 67)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_addvi_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::min_value()); + #[rustfmt::skip] + let r = i64x2(117, -9223372036854775791); + + assert_eq!(r, __msa_addvi_d(a, 17)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_and_v() { + #[rustfmt::skip] + let a = u8x16( + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value() + ); + #[rustfmt::skip] + let b = u8x16( + 4, 3, 2, 100, + 4, 3, 2, 100, + 4, 3, 2, 100, + 4, 3, 2, 100 + ); + let r = u8x16( + 4, 3, 0, 100, + 4, 3, 0, 100, + 4, 3, 0, 100, + 4, 3, 0, 100 + ); + + assert_eq!(r, __msa_and_v(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_andi_b() { + #[rustfmt::skip] + let a = u8x16( + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value(), + 100, u8::max_value(), 100, u8::max_value() + ); + let r = u8x16( + 4, 5, 4, 5, + 4, 5, 4, 5, + 4, 5, 4, 5, + 4, 5, 4, 5 + ); + + assert_eq!(r, __msa_andi_b(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_s_b() { + #[rustfmt::skip] + let a = i8x16( + -1, -2, -3, -4, + -1, -2, -3, -4, + -1, -2, -3, -4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = i8x16( + 5, 5, 5, 5, + 5, 5, 5, 5, + 5, 5, 5, 5, + 5, 5, 5, 5 + ); + + assert_eq!(r, __msa_asub_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_s_h() { + #[rustfmt::skip] + let a = i16x8(-1, -2, -3, -4, -1, -2, -3, -4); + #[rustfmt::skip] + let b = i16x8(-6, -7, -8, -9, -6, -7, -8, -9); + let r = i16x8(5, 5, 5, 5, 5, 5, 5, 5); + + assert_eq!(r, __msa_asub_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_s_w() { + #[rustfmt::skip] + let a = i32x4(-1, -2, -3, -4); + #[rustfmt::skip] + let b = i32x4(-6, -7, -8, -9); + let r = i32x4(5, 5, 5, 5); + + assert_eq!(r, __msa_asub_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, -2); + #[rustfmt::skip] + let b = i64x2(-6, -7); + let r = i64x2(5, 5); + + assert_eq!(r, __msa_asub_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 5, 5, 5, 5, + 5, 5, 5, 5, + 5, 5, 5, 5, + 5, 5, 5, 5 + ); + + assert_eq!(r, __msa_asub_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(5, 5, 5, 5, 5, 5, 5, 5); + + assert_eq!(r, __msa_asub_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(5, 5, 5, 5); + + assert_eq!(r, __msa_asub_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_asub_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(5, 5); + + assert_eq!(r, __msa_asub_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_s_b() { + #[rustfmt::skip] + let a = i8x16( + -1, -2, -3, -4, + -1, -2, -3, -4, + -1, -2, -3, -4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + 6, -7, 8, -9, + 6, -7, 8, -9, + 6, -7, 8, -9, + 6, -7, 8, -9 + ); + let r = i8x16( + 2, -5, 2, -7, + 2, -5, 2, -7, + 2, -5, 2, -7, + 2, -5, 2, -7 + ); + + assert_eq!(r, __msa_ave_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_s_h() { + #[rustfmt::skip] + let a = i16x8(-1, -2, -3, -4, -1, -2, -3, -4); + #[rustfmt::skip] + let b = i16x8(6, -7, 8, -9, 6, -7, 8, -9); + let r = i16x8(2, -5, 2, -7, 2, -5, 2, -7); + + assert_eq!(r, __msa_ave_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_s_w() { + #[rustfmt::skip] + let a = i32x4(-1, -2, -3, -4); + #[rustfmt::skip] + let b = i32x4(6, -7, 8, -9); + let r = i32x4(2, -5, 2, -7); + + assert_eq!(r, __msa_ave_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, -2); + #[rustfmt::skip] + let b = i64x2(-6, -7); + let r = i64x2(-4, -5); + + assert_eq!(r, __msa_ave_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 3, 4, 5, 6, + 3, 4, 5, 6, + 3, 4, 5, 6, + 3, 4, 5, 6 + ); + + assert_eq!(r, __msa_ave_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(3, 4, 5, 6, 3, 4, 5, 6); + + assert_eq!(r, __msa_ave_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(3, 4, 5, 6); + + assert_eq!(r, __msa_ave_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ave_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(3, 4); + + assert_eq!(r, __msa_ave_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_s_b() { + #[rustfmt::skip] + let a = i8x16( + -1, -2, 3, -4, + -1, -2, 3, -4, + -1, -2, 3, -4, + -1, -2, 3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, 7, -8, -9, + -6, 7, -8, -9, + -6, 7, -8, -9, + -6, 7, -8, -9 + ); + let r = i8x16( + -3, 3, -2, -6, + -3, 3, -2, -6, + -3, 3, -2, -6, + -3, 3, -2, -6 + ); + + assert_eq!(r, __msa_aver_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_s_h() { + #[rustfmt::skip] + let a = i16x8(-1, -2, 3, -4, -1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8(-6, 7, -8, -9, -6, 7, -8, -9); + let r = i16x8(-3, 3, -2, -6, -3, 3, -2, -6); + + assert_eq!(r, __msa_aver_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_s_w() { + #[rustfmt::skip] + let a = i32x4(-1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(-6, 7, -8, -9); + let r = i32x4(-3, 3, -2, -6); + + assert_eq!(r, __msa_aver_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, -2); + #[rustfmt::skip] + let b = i64x2(-6, -7); + let r = i64x2(-3, -4); + + assert_eq!(r, __msa_aver_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 4, 5, 6, 7, + 4, 5, 6, 7, + 4, 5, 6, 7, + 4, 5, 6, 7 + ); + + assert_eq!(r, __msa_aver_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(4, 5, 6, 7, 4, 5, 6, 7); + + assert_eq!(r, __msa_aver_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(4, 5, 6, 7); + + assert_eq!(r, __msa_aver_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_aver_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(4, 5); + + assert_eq!(r, __msa_aver_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclr_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 191, 27, 54, 1, + 191, 27, 54, 1, + 191, 27, 54, 1, + 191, 27, 54, 1 + ); + + assert_eq!(r, __msa_bclr_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclr_h() { + #[rustfmt::skip] + let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(191, 27, 55, 1, 191, 27, 55, 1); + + assert_eq!(r, __msa_bclr_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclr_w() { + #[rustfmt::skip] + let a = u32x4(255, 155, 55, 1); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(191, 27, 55, 1); + + assert_eq!(r, __msa_bclr_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclr_d() { + #[rustfmt::skip] + let a = u64x2(255, 155); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(191, 27); + + assert_eq!(r, __msa_bclr_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclri_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + let r = u8x16( + 247, 147, 55, 1, + 247, 147, 55, 1, + 247, 147, 55, 1, + 247, 147, 55, 1 + ); + + assert_eq!(r, __msa_bclri_b(a, 3)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclri_h() { + #[rustfmt::skip] + let a = u16x8(2155, 1155, 155, 1, 2155, 1155, 155, 1); + let r = u16x8(107, 1155, 155, 1, 107, 1155, 155, 1); + + assert_eq!(r, __msa_bclri_h(a, 11)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclri_w() { + #[rustfmt::skip] + let a = u32x4(211111155, 111111155, 11111155, 1); + let r = u32x4(202722547, 102722547, 2722547, 1); + + assert_eq!(r, __msa_bclri_w(a, 23)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bclri_d() { + #[rustfmt::skip] + let a = u64x2(211111111155, 11111111111111155); + let r = u64x2(73672157683, 11110973672157683); + + assert_eq!(r, __msa_bclri_d(a, 37)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsl_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let c = u8x16( + 1, 3, 5, 9, + 1, 3, 5, 9, + 1, 3, 5, 9, + 1, 3, 5, 9 + ); + let r = u8x16( + 63, 11, 11, 1, + 63, 11, 11, 1, + 63, 11, 11, 1, + 63, 11, 11, 1 + ); + + assert_eq!(r, __msa_binsl_b(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsl_h() { + #[rustfmt::skip] + let a = u16x8( + 32767, 16384, 8192, 4096, + 32767, 16384, 8192, 4096 + ); + #[rustfmt::skip] + let b = u16x8( + 21656, 5273, 7081, 2985, + 21656, 5273, 7081, 2985 + ); + #[rustfmt::skip] + let c = u16x8( + 3, 7, 9, 13, + 15, 17, 21, 23 + ); + let r = u16x8( + 24575, 5120, 7040, 2984, + 21656, 0, 6144, 2816 + ); + + assert_eq!(r, __msa_binsl_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsl_w() { + #[rustfmt::skip] + let a = u32x4(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4(1036372536, 259093134, 78219975, 1119499719); + #[rustfmt::skip] + let c = u32x4(11, 15, 31, 37); + let r = u32x4(1037041663, 259063808, 78219975, 1082130432); + + assert_eq!(r, __msa_binsl_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsl_d() { + #[rustfmt::skip] + let a = u64x2(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2(9223372036854775805, 536870912); + #[rustfmt::skip] + let c = u64x2(12, 48); + let r = u64x2(9221120245047489898, 536901394); + + assert_eq!(r, __msa_binsl_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsli_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 7, 7, 11, 9, + 7, 7, 11, 9, + 7, 7, 11, 9, + 7, 7, 11, 9 + ); + + assert_eq!(r, __msa_binsli_b(a, b, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsli_h() { + #[rustfmt::skip] + let a = u16x8( + 32767, 16384, 8192, 4096, + 32767, 16384, 8192, 4096 + ); + #[rustfmt::skip] + let b = u16x8( + 21656, 5273, 7081, 2985, + 21656, 5273, 7081, 2985 + ); + let r = u16x8( + 21659, 5272, 7080, 2984, + 21659, 5272, 7080, 2984 + ); + + assert_eq!(r, __msa_binsli_h(a, b, 13)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsli_w() { + #[rustfmt::skip] + let a = u32x4(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4(1036372536, 259093134, 78219975, 1119499719); + let r = u32x4(1036386303, 259080192, 78217216, 1119485952); + + assert_eq!(r, __msa_binsli_w(a, b, 17)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsli_d() { + #[rustfmt::skip] + let a = u64x2(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2(9223372036854775805, 536870912); + let r = u64x2(9223372036854773098, 536901394); + + assert_eq!(r, __msa_binsli_d(a, b, 48)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsr_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let c = u8x16( + 1, 3, 5, 9, + 1, 3, 5, 9, + 1, 3, 5, 9, + 1, 3, 5, 9 + ); + let r = u8x16( + 254, 151, 8, 1, + 254, 151, 8, 1, + 254, 151, 8, 1, + 254, 151, 8, 1 + ); + + assert_eq!(r, __msa_binsr_b(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsr_h() { + #[rustfmt::skip] + let a = u16x8( + 32767, 16384, 8192, 4096, + 32767, 16384, 8192, 4096 + ); + #[rustfmt::skip] + let b = u16x8( + 21656, 5273, 7081, 2985, + 21656, 5273, 7081, 2985 + ); + #[rustfmt::skip] + let c = u16x8( + 3, 7, 9, 13, + 15, 17, 21, 23 + ); + let r = u16x8( + 32760, 16537, 9129, 2985, + 21656, 16385, 8233, 4265 + ); + + assert_eq!(r, __msa_binsr_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsr_w() { + #[rustfmt::skip] + let a = u32x4(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4(1036372536, 259093134, 78219975, 1119499719); + #[rustfmt::skip] + let c = u32x4(11, 15, 31, 37); + let r = u32x4(2147482168, 536900238, 78219975, 8388615); + + assert_eq!(r, __msa_binsr_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsr_d() { + #[rustfmt::skip] + let a = u64x2(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2(9223372036854775805, 536870912); + #[rustfmt::skip] + let c = u64x2(12, 48); + let r = u64x2(8006402045, 536870912); + + assert_eq!(r, __msa_binsr_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsri_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 198, 135, 8, 9, + 198, 135, 8, 9, + 198, 135, 8, 9, + 198, 135, 8, 9 + ); + + assert_eq!(r, __msa_binsri_b(a, b, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsri_h() { + #[rustfmt::skip] + let a = u16x8( + 32767, 16384, 8192, 4096, + 32767, 16384, 8192, 4096 + ); + #[rustfmt::skip] + let b = u16x8( + 21656, 5273, 7081, 2985, + 21656, 5273, 7081, 2985 + ); + let r = u16x8( + 21656, 21657, 7081, 2985, + 21656, 21657, 7081, 2985 + ); + + assert_eq!(r, __msa_binsri_h(a, b, 13)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsri_w() { + #[rustfmt::skip] + let a = u32x4(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4(1036372536, 259093134, 78219975, 1119499719); + let r = u32x4(2147338808, 536965774, 67209927, 8533447); + + assert_eq!(r, __msa_binsri_w(a, b, 17)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_binsri_d() { + #[rustfmt::skip] + let a = u64x2(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2(9223372036854775805, 536870912); + let r = u64x2(562949953421309, 536870912); + + assert_eq!(r, __msa_binsri_d(a, b, 48)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bmnz_v() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + ); + #[rustfmt::skip] + let c = u8x16( + 3, 5, 7, 1, + 3, 5, 7, 1, + 3, 5, 7, 1, + 3, 5, 7, 1 + ); + let r = u8x16( + 254, 159, 48, 1, + 254, 159, 48, 1, + 254, 159, 48, 1, + 254, 159, 48, 1 + ); + + assert_eq!(r, __msa_bmnz_v(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bmnzi_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 1, u8::max_value(), 155, 55, + 1, u8::max_value(), 155, 55, + 1, u8::max_value(), 155, 55, + 1, u8::max_value(), 155, 55 + ); + let r = u8x16( + 249, 159, 51, 7, + 249, 159, 51, 7, + 249, 159, 51, 7, + 249, 159, 51, 7 + ); + + assert_eq!(r, __msa_bmnzi_b(a, b, 7)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bmz_v() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let c = u8x16( + 3, 5, 7, 1, + 3, 5, 7, 1, + 3, 5, 7, 1, + 3, 5, 7, 1 + ); + let r = u8x16( + 7, 3, 15, 9, + 7, 3, 15, 9, + 7, 3, 15, 9, + 7, 3, 15, 9 + ); + + assert_eq!(r, __msa_bmz_v(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bmzi_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1, + u8::max_value(), 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 1, 255, 155, 55, + 1, 255, 155, 55, + 1, 255, 155, 55, + 1, 255, 155, 55 + ); + let r = u8x16( + 7, 251, 159, 49, + 7, 251, 159, 49, + 7, 251, 159, 49, + 7, 251, 159, 49 + ); + + assert_eq!(r, __msa_bmzi_b(a, b, 7)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bneg_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 191, 27, 54, 3, + 191, 27, 54, 3, + 191, 27, 54, 3, + 191, 27, 54, 3 + ); + + assert_eq!(r, __msa_bneg_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bneg_h() { + #[rustfmt::skip] + let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(191, 27, 311, 513, 191, 27, 311, 513); + + assert_eq!(r, __msa_bneg_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bneg_w() { + #[rustfmt::skip] + let a = u32x4(255, 155, 55, 1); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(191, 27, 311, 513); + + assert_eq!(r, __msa_bneg_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bneg_d() { + #[rustfmt::skip] + let a = u64x2(255, 155); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(191, 27); + + assert_eq!(r, __msa_bneg_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnegi_b() { + #[rustfmt::skip] + let a = u8x16( + 50, 100, 127, u8::max_value(), + 50, 100, 127, u8::max_value(), + 50, 100, 127, u8::max_value(), + 50, 100, 127, u8::max_value() + ); + let r = u8x16( + 34, 116, 111, 239, + 34, 116, 111, 239, + 34, 116, 111, 239, + 34, 116, 111, 239 + ); + + assert_eq!(r, __msa_bnegi_b(a, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnegi_h() { + #[rustfmt::skip] + let a = u16x8( + 32767, 3276, 100, 127, + 32767, 3276, 100, 127 + ); + let r = u16x8(30719, 1228, 2148, 2175, 30719, 1228, 2148, 2175); + + assert_eq!(r, __msa_bnegi_h(a, 11)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnegi_w() { + #[rustfmt::skip] + let a = u32x4(100, 2147483647, 100, 2147483648); + let r = u32x4(16777316, 2130706431, 16777316, 2164260864); + + assert_eq!(r, __msa_bnegi_w(a, 24)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnegi_d() { + #[rustfmt::skip] + let a = u64x2(100, 9223372036854775808); + #[rustfmt::skip] + let r = u64x2(4398046511204, 9223376434901286912); + + assert_eq!(r, __msa_bnegi_d(a, 42)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnz_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 1, 1, 1, + 1, 1, 1, 1, + 2, 2, 2, 2, + 4, 4, 0, 4, + ); + let r = 0 as i32; + + assert_eq!(r, __msa_bnz_b(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnz_h() { + #[rustfmt::skip] + let a = u16x8( + 32767, 3276, 100, 127, + 32767, 0, 100, 127 + ); + let r = 0 as i32; + + assert_eq!(r, __msa_bnz_h(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnz_w() { + #[rustfmt::skip] + let a = u32x4(100, 2147483647, 0, 2147483648); + let r = 0 as i32; + + assert_eq!(r, __msa_bnz_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnz_d() { + #[rustfmt::skip] + let a = u64x2(100, 9223372036854775808); + #[rustfmt::skip] + let r = 1 as i32; + + assert_eq!(r, __msa_bnz_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bnz_v() { + #[rustfmt::skip] + let a = u8x16( + 0, 0, 0, 1, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + ); + let r = 1 as i32; + + assert_eq!(r, __msa_bnz_v(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bsel_v() { + #[rustfmt::skip] + let a = u8x16( + 3, 5, 7, 1, + 3, 5, 7, 1, + 3, 5, 7, 1, + 3, 5, 7, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let c = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + let r = u8x16(7, 3, 15, 9, 7, 3, 15, 9, 7, 3, 15, 9, 7, 3, 15, 9); + + assert_eq!(r, __msa_bsel_v(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bseli_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16(121, 29, 57, 9, 121, 29, 57, 9, 121, 29, 57, 9, 121, 29, 57, 9); + + assert_eq!(r, __msa_bseli_b(a, b, 121)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bset_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16(255, 155, 55, 3, 255, 155, 55, 3, 255, 155, 55, 3, 255, 155, 55, 3); + + assert_eq!(r, __msa_bset_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bset_h() { + #[rustfmt::skip] + let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(255, 155, 311, 513, 255, 155, 311, 513); + + assert_eq!(r, __msa_bset_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bset_w() { + #[rustfmt::skip] + let a = u32x4(255, 155, 55, 1); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(255, 155, 311, 513); + + assert_eq!(r, __msa_bset_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bset_d() { + #[rustfmt::skip] + let a = u64x2(255, 155); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(255, 155); + + assert_eq!(r, __msa_bset_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bseti_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + let r = u8x16( + 255, 159, 55, 5, + 255, 159, 55, 5, + 255, 159, 55, 5, + 255, 159, 55, 5 + ); + + assert_eq!(r, __msa_bseti_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bseti_h() { + #[rustfmt::skip] + let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + let r = u16x8(255, 159, 55, 5, 255, 159, 55, 5); + + assert_eq!(r, __msa_bseti_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bseti_w() { + #[rustfmt::skip] + let a = u32x4(255, 155, 55, 1); + let r = u32x4(255, 159, 55, 5); + + assert_eq!(r, __msa_bseti_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bseti_d() { + #[rustfmt::skip] + let a = u64x2(255, 155); + let r = u64x2(255, 159); + + assert_eq!(r, __msa_bseti_d(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bz_b() { + #[rustfmt::skip] + let a = u8x16( + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1, + 255, 155, 55, 1 + ); + let r = 0 as i32; + + assert_eq!(r, __msa_bz_b(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bz_h() { + #[rustfmt::skip] + let a = u16x8(0, 0, 0, 0, 0, 0, 0, 0); + let r = 1 as i32; + + assert_eq!(r, __msa_bz_h(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bz_w() { + #[rustfmt::skip] + let a = u32x4(255, 0, 55, 1); + let r = 1 as i32; + + assert_eq!(r, __msa_bz_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bz_d() { + #[rustfmt::skip] + let a = u64x2(255, 0); + let r = 1 as i32; + + assert_eq!(r, __msa_bz_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_bz_v() { + #[rustfmt::skip] + let a = u8x16( + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0 + ); + let r = 1 as i32; + + assert_eq!(r, __msa_bz_v(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceq_b() { + #[rustfmt::skip] + let a = i8x16( + -128, 127, 55, 1, + -128, 127, 55, 1, + -128, 127, 55, 1, + -128, 127, 55, 1 + ); + #[rustfmt::skip] + let b = i8x16( + -128, 126, 55, 1, + -128, 126, 55, 1, + -128, 126, 55, 1, + -128, 126, 55, 1 + ); + let r = i8x16(-1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1); + + assert_eq!(r, __msa_ceq_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceq_h() { + #[rustfmt::skip] + let a = i16x8(255, 155, 55, 1, 255, 155, 55, 1); + #[rustfmt::skip] + let b = i16x8(255, 155, 56, 1, 255, 155, 56, 1); + let r = i16x8(-1, -1, 0, -1, -1, -1, 0, -1); + + assert_eq!(r, __msa_ceq_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceq_w() { + #[rustfmt::skip] + let a = i32x4(255, 155, 55, 1); + #[rustfmt::skip] + let b = i32x4(255, 156, 55, 1); + let r = i32x4(-1, 0, -1, -1); + + assert_eq!(r, __msa_ceq_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceq_d() { + #[rustfmt::skip] + let a = i64x2(255, 155); + #[rustfmt::skip] + let b = i64x2(255, 156); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_ceq_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceqi_b() { + #[rustfmt::skip] + let a = i8x16( + 100, -1, -4, 15, + 100, -1, -4, 15, + 100, -1, -4, 15, + 100, -1, -4, 15 + ); + let r = i8x16( + 0, 0, -1, 0, + 0, 0, -1, 0, + 0, 0, -1, 0, + 0, 0, -1, 0 + ); + + assert_eq!(r, __msa_ceqi_b(a, -4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceqi_h() { + #[rustfmt::skip] + let a = i16x8( + 32767, 3276, 100, -11, + 32767, 3276, 100, -11 + ); + let r = i16x8(0, 0, 0, -1, 0, 0, 0, -1); + + assert_eq!(r, __msa_ceqi_h(a, -11)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ceqi_w() { + #[rustfmt::skip] + let a = i32x4(1, 3, 5, -3); + let r = i32x4(0, 0, -1, 0); + + assert_eq!(r, __msa_ceqi_w(a, 5)); + } + + // FIXME: https://reviews.llvm.org/D59884 + // If target type is i64, negative immediate loses the sign + // Test passes if 4294967293 is used instead -3 in vector 'a' + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_ceqi_d() { + // #[rustfmt::skip] + // let a = i64x2(-3, 2); + // #[rustfmt::skip] + // let r = i64x2(-1, 0); + + // assert_eq!(r, __msa_ceqi_d(a, -3)); + // } + + // Can not be tested in user mode + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_cfcmsa() { + // let r = 5; + + // assert_eq!(r, __msa_cfcmsa(5)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_s_b() { + #[rustfmt::skip] + let a = i8x16( + -128, 127, 55, 2, + -128, 127, 55, 2, + -128, 127, 55, 2, + -128, 127, 55, 2 + ); + #[rustfmt::skip] + let b = i8x16( + -128, 126, 55, 1, + -128, 126, 55, 1, + -128, 126, 55, 1, + -128, 126, 55, 1 + ); + let r = i8x16(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + + assert_eq!(r, __msa_cle_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_s_h() { + #[rustfmt::skip] + let a = i16x8(255, 155, 55, 2, 255, 155, 55, 2); + #[rustfmt::skip] + let b = i16x8(255, 155, 56, 1, 255, 155, 56, 1); + let r = i16x8(-1, -1, -1, 0, -1, -1, -1, 0); + + assert_eq!(r, __msa_cle_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_s_w() { + #[rustfmt::skip] + let a = i32x4(255, 155, 55, 2); + #[rustfmt::skip] + let b = i32x4(255, 156, 55, 1); + let r = i32x4(-1, -1, -1, 0); + + assert_eq!(r, __msa_cle_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_s_d() { + #[rustfmt::skip] + let a = i64x2(255, 155); + #[rustfmt::skip] + let b = i64x2(255, 156); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_cle_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_u_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 127, 55, 2, + u8::max_value(), 127, 55, 2, + u8::max_value(), 127, 55, 2, + u8::max_value(), 127, 55, 2 + ); + #[rustfmt::skip] + let b = u8x16( + u8::max_value(), 126, 55, 1, + u8::max_value(), 126, 55, 1, + u8::max_value(), 126, 55, 1, + u8::max_value(), 126, 55, 1 + ); + let r = i8x16(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + + assert_eq!(r, __msa_cle_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_u_h() { + #[rustfmt::skip] + let a = u16x8( + u16::max_value(), 155, 55, 2, + u16::max_value(), 155, 55, 2 + ); + #[rustfmt::skip] + let b = u16x8( + u16::max_value(), 155, 56, 1, + u16::max_value(), 155, 56, 1 + ); + let r = i16x8(-1, -1, -1, 0, -1, -1, -1, 0); + + assert_eq!(r, __msa_cle_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_u_w() { + #[rustfmt::skip] + let a = u32x4(u32::max_value(), 155, 55, 2); + #[rustfmt::skip] + let b = u32x4(u32::max_value(), 156, 55, 1); + let r = i32x4(-1, -1, -1, 0); + + assert_eq!(r, __msa_cle_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_cle_u_d() { + #[rustfmt::skip] + let a = u64x2(u64::max_value(), 155); + #[rustfmt::skip] + let b = u64x2(u64::max_value(), 156); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_cle_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_s_b() { + #[rustfmt::skip] + let a = i8x16( + -2, -127, 100, -127, + -2, -127, 100, -127, + -2, -127, 100, -127, + -2, -127, 100, -127 + ); + let r = i8x16(-1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1); + + assert_eq!(r, __msa_clei_s_b(a, -2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_s_h() { + #[rustfmt::skip] + let a = i16x8( + 32767, 3276, 10, -1, + 32767, 3276, 10, -1, + ); + let r = i16x8(0, 0, 0, -1, 0, 0, 0, -1); + + assert_eq!(r, __msa_clei_s_h(a, -1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_s_w() { + #[rustfmt::skip] + let a = i32x4(100, 2147483647, 6, 2147483647); + let r = i32x4(0, 0, -1, 0); + + assert_eq!(r, __msa_clei_s_w(a, 6)); + } + + // FIXME: https://reviews.llvm.org/D59884 + // If target type is i64, negative immediate loses the sign + // -3 is represented as 4294967293 + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_clei_s_d() { + // #[rustfmt::skip] + // let a = i64x2(-3, 11); + // #[rustfmt::skip] + // let r = i64x2(-1, 0); + + // assert_eq!(r, __msa_clei_s_d(a, -3)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_u_b() { + #[rustfmt::skip] + let a = u8x16( + 2, 127, 100, 127, + 2, 127, 100, 127, + 2, 127, 100, 127, + 2, 127, 100, 127, + ); + let r = i8x16(-1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0); + + assert_eq!(r, __msa_clei_u_b(a, 25)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_u_h() { + #[rustfmt::skip] + let a = u16x8( + 1, 26, 15, 36, + 1, 26, 15, 36 + ); + let r = i16x8(-1, 0, -1, 0, -1, 0, -1, 0); + + assert_eq!(r, __msa_clei_u_h(a, 25)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_u_w() { + #[rustfmt::skip] + let a = u32x4(25, 32, 25, 32); + let r = i32x4(-1, 0, -1, 0); + + assert_eq!(r, __msa_clei_u_w(a, 31)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clei_u_d() { + #[rustfmt::skip] + let a = u64x2(10, 26); + #[rustfmt::skip] + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_clei_u_d(a, 25)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_s_b() { + #[rustfmt::skip] + let a = i8x16( + -128, 127, 55, 2, + -128, 127, 55, 2, + -128, 127, 55, 2, + -128, 127, 55, 2 + ); + #[rustfmt::skip] + let b = i8x16( + -127, 126, 56, 1, + -127, 126, 56, 1, + -127, 126, 56, 1, + -127, 126, 56, 1 + ); + let r = i8x16( + -1, 0, -1, 0, + -1, 0, -1, 0, + -1, 0, -1, 0, + -1, 0, -1, 0 + ); + + assert_eq!(r, __msa_clt_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_s_h() { + #[rustfmt::skip] + let a = i16x8(-255, 155, 55, 2, -255, 155, 55, 2); + #[rustfmt::skip] + let b = i16x8(255, 156, 56, 1, 255, 156, 56, 1); + let r = i16x8(-1, -1, -1, 0, -1, -1, -1, 0); + + assert_eq!(r, __msa_clt_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_s_w() { + #[rustfmt::skip] + let a = i32x4(-255, 155, 55, 2); + #[rustfmt::skip] + let b = i32x4(255, 156, 55, 1); + let r = i32x4(-1, -1, 0, 0); + + assert_eq!(r, __msa_clt_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_s_d() { + #[rustfmt::skip] + let a = i64x2(-255, 155); + #[rustfmt::skip] + let b = i64x2(255, 156); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_clt_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_u_b() { + #[rustfmt::skip] + let a = u8x16( + 128, 127, 55, 2, + 128, 127, 55, 2, + 128, 127, 55, 2, + 128, 127, 55, 2 + ); + #[rustfmt::skip] + let b = u8x16( + 127, 126, 56, 1, + 127, 126, 56, 1, + 127, 126, 56, 1, + 127, 126, 56, 1 + ); + let r = i8x16( + 0, 0, -1, 0, + 0, 0, -1, 0, + 0, 0, -1, 0, + 0, 0, -1, 0 + ); + + assert_eq!(r, __msa_clt_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_u_h() { + #[rustfmt::skip] + let a = u16x8(255, 155, 55, 2, 255, 155, 55, 2); + #[rustfmt::skip] + let b = u16x8(255, 156, 56, 1, 255, 156, 56, 1); + let r = i16x8(0, -1, -1, 0, 0, -1, -1, 0); + + assert_eq!(r, __msa_clt_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_u_w() { + #[rustfmt::skip] + let a = u32x4(255, 155, 55, 2); + #[rustfmt::skip] + let b = u32x4(255, 156, 55, 1); + let r = i32x4(0, -1, 0, 0); + + assert_eq!(r, __msa_clt_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clt_u_d() { + #[rustfmt::skip] + let a = u64x2(255, 155); + #[rustfmt::skip] + let b = u64x2(255, 156); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_clt_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_s_b() { + #[rustfmt::skip] + let a = i8x16( + 2, -127, -5, 127, + 2, -127, -5, 127, + 2, -127, -5, 127, + 2, -127, -5, 127 + ); + let r = i8x16( + 0, -1, 0, 0, + 0, -1, 0, 0, + 0, -1, 0, 0, + 0, -1, 0, 0 + ); + + assert_eq!(r, __msa_clti_s_b(a, -5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_s_h() { + #[rustfmt::skip] + let a = i16x8( + -1024, 3276, 15, 127, + -1024, 3276, 15, 127 + ); + let r = i16x8(-1, 0, 0, 0, -1, 0, 0, 0); + + assert_eq!(r, __msa_clti_s_h(a, 15)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_s_w() { + #[rustfmt::skip] + let a = i32x4(-15, 2147483647, -15, 2147483647); + let r = i32x4(-1, 0, -1, 0); + + assert_eq!(r, __msa_clti_s_w(a, -10)); + } + + // FIXME: https://reviews.llvm.org/D59884 + // If target type is i64, negative immediate loses the sign + // -3 is represented as 4294967293 + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_clti_s_d() { + // #[rustfmt::skip] + // let a = i64x2(-5, -2); + // #[rustfmt::skip] + // let r = i64x2(-1, 0); + + // assert_eq!(r, __msa_clti_s_d(a, -3)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_u_b() { + #[rustfmt::skip] + let a = u8x16( + 2, 127, 49, 127, + 2, 127, 49, 127, + 2, 127, 49, 127, + 2, 127, 49, 127, + ); + let r = i8x16( + -1, 0, 0, 0, + -1, 0, 0, 0, + -1, 0, 0, 0, + -1, 0, 0, 0 + ); + + assert_eq!(r, __msa_clti_u_b(a, 50)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_u_h() { + #[rustfmt::skip] + let a = u16x8( + 327, 3276, 100, 127, + 327, 3276, 100, 127 + ); + let r = i16x8(0, 0, 0, 0, 0, 0, 0, 0); + + assert_eq!(r, __msa_clti_u_h(a, 30)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_u_w() { + #[rustfmt::skip] + let a = u32x4(100, 2147483647, 100, 2147483647); + let r = i32x4(0, 0, 0, 0); + + assert_eq!(r, __msa_clti_u_w(a, 10)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_clti_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 9223372036854775807); + #[rustfmt::skip] + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_clti_u_d(a, 10)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_s_b() { + #[rustfmt::skip] + let a = i8x16( + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127 + ); + let r = -100 as i32; + + assert_eq!(r, __msa_copy_s_b(a, 12)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_s_h() { + #[rustfmt::skip] + let a = i16x8( + 32767, 3276, 100, 11, + 32767, 3276, 100, 11 + ); + let r = 32767 as i32; + + assert_eq!(r, __msa_copy_s_h(a, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_s_w() { + #[rustfmt::skip] + let a = i32x4(100, 2147483647, 5, -2147483647); + let r = 2147483647 as i32; + + assert_eq!(r, __msa_copy_s_w(a, 1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_s_d() { + #[rustfmt::skip] + let a = i64x2(3, 9223372036854775807); + #[rustfmt::skip] + let r = 9223372036854775807 as i64; + + assert_eq!(r, __msa_copy_s_d(a, 1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_u_b() { + #[rustfmt::skip] + let a = i8x16( + 100, 127, 4, 127, + 100, 127, 4, 127, + 100, 127, 4, 127, + 100, 127, 4, 127 + ); + let r = 100 as u32; + + assert_eq!(r, __msa_copy_u_b(a, 12)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_u_h() { + #[rustfmt::skip] + let a = i16x8( + 32767, 3276, 100, 11, + 32767, 3276, 100, 11 + ); + let r = 32767 as u32; + + assert_eq!(r, __msa_copy_u_h(a, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_u_w() { + #[rustfmt::skip] + let a = i32x4(100, 2147483647, 5, 2147483647); + let r = 2147483647 as u32; + + assert_eq!(r, __msa_copy_u_w(a, 1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_copy_u_d() { + #[rustfmt::skip] + let a = i64x2(3, i64::max_value()); + #[rustfmt::skip] + let r = 9223372036854775807 as u64; + + assert_eq!(r, __msa_copy_u_d(a, 1)); + } + + // Can not be tested in user mode + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_ctcmsa() { + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_s_b() { + #[rustfmt::skip] + let a = i8x16( + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + #[rustfmt::skip] + let b = i8x16( + -1, -2, -3, -4, + -1, -2, -3, -4, + -1, -2, -3, -4, + -1, -2, -3, -4 + ); + let r = i8x16( + 6, 3, 2, 2, + 6, 3, 2, 2, + 6, 3, 2, 2, + 6, 3, 2, 2 + ); + + assert_eq!(r, __msa_div_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_s_h() { + #[rustfmt::skip] + let a = i16x8(-6, -7, -8, -9, 6, 7, 8, 9); + #[rustfmt::skip] + let b = i16x8(-1, -2, -3, -4, -1, -2, -3, -4); + let r = i16x8(6, 3, 2, 2, -6, -3, -2, -2); + + assert_eq!(r, __msa_div_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_s_w() { + #[rustfmt::skip] + let a = i32x4(-6, -7, 8, 9); + #[rustfmt::skip] + let b = i32x4(-1, -2, -3, -4); + let r = i32x4(6, 3, -2, -2); + + assert_eq!(r, __msa_div_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_s_d() { + #[rustfmt::skip] + let a = i64x2(-6, 7); + #[rustfmt::skip] + let b = i64x2(-1, -2); + let r = i64x2(6, -3); + + assert_eq!(r, __msa_div_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_u_b() { + #[rustfmt::skip] + let a = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let b = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = u8x16( + 6, 3, 2, 2, + 6, 3, 2, 2, + 6, 3, 2, 2, + 6, 3, 2, 2 + ); + + assert_eq!(r, __msa_div_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_u_h() { + #[rustfmt::skip] + let a = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let b = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let r = u16x8(6, 3, 2, 2, 6, 3, 2, 2); + + assert_eq!(r, __msa_div_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_u_w() { + #[rustfmt::skip] + let a = u32x4(6, 7, 8, 9); + #[rustfmt::skip] + let b = u32x4(1, 2, 3, 4); + let r = u32x4(6, 3, 2, 2); + + assert_eq!(r, __msa_div_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_div_u_d() { + #[rustfmt::skip] + let a = u64x2(6, 7); + #[rustfmt::skip] + let b = u64x2(1, 2); + let r = u64x2(6, 3); + + assert_eq!(r, __msa_div_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dotp_s_h() { + #[rustfmt::skip] + let a = i8x16( + -1, -2, -3, 4, + -1, -2, -3, -4, + -1, -2, -3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = i16x8(20, -12, 20, 60, 20, -12, 20, 60); + + assert_eq!(r, __msa_dotp_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dotp_s_w() { + #[rustfmt::skip] + let a = i16x8(-1, -2, -3, -4, -1, -2, -3, 4); + #[rustfmt::skip] + let b = i16x8(-6, -7, -8, -9, -6, -7, -8, -9); + let r = i32x4(20, 60, 20, -12); + + assert_eq!(r, __msa_dotp_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dotp_s_d() { + #[rustfmt::skip] + let a = i32x4(-1, -2, -3, 4); + #[rustfmt::skip] + let b = i32x4(-6, -7, -8, -9); + let r = i64x2(20, -12); + + assert_eq!(r, __msa_dotp_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dotp_u_h() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u16x8(20, 60, 20, 60, 20, 60, 20, 60); + + assert_eq!(r, __msa_dotp_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dotp_u_w() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u32x4(20, 60, 20, 60); + + assert_eq!(r, __msa_dotp_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dotp_u_d() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u64x2(20, 60); + + assert_eq!(r, __msa_dotp_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpadd_s_h() { + #[rustfmt::skip] + let a = i16x8(-1, -2, -3, -4, -1, -2, -3, 4); + #[rustfmt::skip] + let b = i8x16( + -1, -2, -3, 4, + -1, -2, -3, -4, + -1, -2, -3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let c = i8x16( + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = i16x8(19, -14, 17, 56, 19, -14, 17, 64); + + assert_eq!(r, __msa_dpadd_s_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpadd_s_w() { + #[rustfmt::skip] + let a = i32x4(-1, -2, -3, -4); + #[rustfmt::skip] + let b = i16x8( + -1, -2, -3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let c = i16x8( + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = i32x4(19, -14, 17, 56); + + assert_eq!(r, __msa_dpadd_s_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpadd_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, -2); + #[rustfmt::skip] + let b = i32x4(-1, -2, -3, 4); + #[rustfmt::skip] + let c = i32x4(-6, -7, -8, -9); + let r = i64x2(19, -14); + + assert_eq!(r, __msa_dpadd_s_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpadd_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let c = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u16x8(21, 62, 23, 64, 21, 62, 23, 64); + + assert_eq!(r, __msa_dpadd_u_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpadd_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let c = u16x8( + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u32x4(21, 62, 23, 64); + + assert_eq!(r, __msa_dpadd_u_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpadd_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let b = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let c = u32x4(6, 7, 8, 9); + let r = u64x2(21, 62); + + assert_eq!(r, __msa_dpadd_u_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpsub_s_h() { + #[rustfmt::skip] + let a = i16x8(-1, -2, -3, -4, -1, -2, -3, 4); + #[rustfmt::skip] + let b = i8x16( + -1, -2, -3, 4, + -1, -2, -3, -4, + -1, -2, -3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let c = i8x16( + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = i16x8(-21, 10, -23, -64, -21, 10, -23, -56); + + assert_eq!(r, __msa_dpsub_s_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpsub_s_w() { + #[rustfmt::skip] + let a = i32x4(-1, -2, -3, -4); + #[rustfmt::skip] + let b = i16x8( + -1, -2, -3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let c = i16x8( + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = i32x4(-21, 10, -23, -64); + + assert_eq!(r, __msa_dpsub_s_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpsub_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, -2); + #[rustfmt::skip] + let b = i32x4(-1, -2, -3, 4); + #[rustfmt::skip] + let c = i32x4(-6, -7, -8, -9); + let r = i64x2(-21, 10); + + assert_eq!(r, __msa_dpsub_s_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpsub_u_h() { + #[rustfmt::skip] + let a = i16x8(1, -2, 3, -4, -1, 2,-3, 4); + #[rustfmt::skip] + let b = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let c = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = i16x8(-19, -62, -17, -64, -21, -58, -23, -56); + + assert_eq!(r, __msa_dpsub_u_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpsub_u_w() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = u16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let c = u16x8( + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = i32x4(-19, -62, -17, -64); + + assert_eq!(r, __msa_dpsub_u_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_dpsub_u_d() { + #[rustfmt::skip] + let a = i64x2(1, -2); + #[rustfmt::skip] + let b = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let c = u32x4(6, 7, 8, 9); + let r = i64x2(-19, -62); + + assert_eq!(r, __msa_dpsub_u_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fadd_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, -4.4); + #[rustfmt::skip] + let b = f32x4(4.4, -3.3, 2.2, -1.1); + let r = f32x4(5.5, -5.5, 5.5, -5.5); + + assert_eq!(r, __msa_fadd_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fadd_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(4.4, -3.3); + let r = f64x2(5.5, -5.5); + + assert_eq!(r, __msa_fadd_d(a, b)); + } + + // Only observed beahiour should be SIGFPE signal + // Can not be tested + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcaf_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, -4.4); + #[rustfmt::skip] + let b = f32x4(0.0, -1.2, 3.3, f32::NAN); + let r = i32x4(0, 0, 0, 0); + + assert_eq!(r, __msa_fcaf_w(a, b)); + } + + // Only observed beahiour should be SIGFPE signal + // Can not be tested + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcaf_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(-2.2, 1.1); + let r = i64x2(0, 0); + + assert_eq!(r, __msa_fcaf_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fceq_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(-4.4, -2.2, 3.3, f32::NAN); + let r = i32x4(0, -1, -1, 0); + + assert_eq!(r, __msa_fceq_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fceq_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_fceq_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fclass_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let r = i32x4(128, 8, 128, 2); + + assert_eq!(r, __msa_fclass_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fclass_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + let r = i64x2(128, 8); + + assert_eq!(r, __msa_fclass_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcle_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(-4.4, -1.2, 3.3, f32::NAN); + let r = i32x4(0, -1, -1, 0); + + assert_eq!(r, __msa_fcle_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcle_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_fcle_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fclt_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(-4.4, -1.2, 3.3, f32::NAN); + let r = i32x4(0, -1, 0, 0); + + assert_eq!(r, __msa_fclt_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fclt_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fclt_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcne_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(-4.4, -1.2, 3.3, f32::NAN); + let r = i32x4(-1, -1, 0, 0); + + assert_eq!(r, __msa_fcne_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcne_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fcne_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcor_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); + let r = i32x4(0, -1, -1, 0); + + assert_eq!(r, __msa_fcor_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcor_d() { + #[rustfmt::skip] + let a = f64x2(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_fcor_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcueq_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); + let r = i32x4(-1, 0, -1, -1); + + assert_eq!(r, __msa_fcueq_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcueq_d() { + #[rustfmt::skip] + let a = f64x2(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_fcueq_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcule_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); + let r = i32x4(-1, -1, -1, -1); + + assert_eq!(r, __msa_fcule_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcule_d() { + #[rustfmt::skip] + let a = f64x2(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_fcule_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcult_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); + let r = i32x4(-1, -1, 0, -1); + + assert_eq!(r, __msa_fcult_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcult_d() { + #[rustfmt::skip] + let a = f64x2(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fcult_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcun_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); + let r = i32x4(-1, 0, 0, -1); + + assert_eq!(r, __msa_fcun_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcun_d() { + #[rustfmt::skip] + let a = f64x2(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fcun_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcune_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); + let r = i32x4(-1, -1, 0, -1); + + assert_eq!(r, __msa_fcune_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fcune_d() { + #[rustfmt::skip] + let a = f64x2(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2(1.1, 1.1); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fcune_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fdiv_w() { + #[rustfmt::skip] + let a = f32x4(5.25, -20.2, 333.333, -425.0); + #[rustfmt::skip] + let b = f32x4(4.0, -2.1, 11.11, 8.2); + let r = f32x4(1.3125, 9.619048, 30.002972, -51.82927); + + assert_eq!(r, __msa_fdiv_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fdiv_d() { + #[rustfmt::skip] + let a = f64x2(1111.11, -222222.2); + #[rustfmt::skip] + let b = f64x2(-4.85, 3.33); + let r = f64x2(-229.09484536082473, -66733.3933933934); + + assert_eq!(r, __msa_fdiv_d(a, b)); + } + + /*// FIXME: 16-bit floats + #[simd_test(enable = "msa")] + unsafe fn test_msa_fexdo_h() { + #[rustfmt::skip] + let a = f32x4(20.5, 2.3, 4.5, 5.4); + #[rustfmt::skip] + let b = f32x4(1.1, 1.0, 1.0, 1.0); + let r = i16x8(1, 9, 30, 51, 1, 9, 30, 51); + + assert_eq!(r, __msa_fexdo_h(a, b)); + }*/ + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fexdo_w() { + #[rustfmt::skip] + let a = f64x2(2000005.5, 2.3); + #[rustfmt::skip] + let b = f64x2(1235689784512.1, 2147483649998.5); + let r = f32x4( + 1235689800000.0, 2147483600000.0, + 2000005.5, 2.3 + ); + + assert_eq!(r, __msa_fexdo_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fexp2_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, -4.4); + #[rustfmt::skip] + let b = i32x4(4, -3, 2, 1); + let r = f32x4(17.6, -0.275, 13.2, -8.8); + + assert_eq!(r, __msa_fexp2_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fexp2_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = i64x2(-4, 3); + let r = f64x2(0.06875, -17.6); + + assert_eq!(r, __msa_fexp2_d(a, b)); + } + + // FIXME: 16-bit floats + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_fexupl_w() { + // #[rustfmt::skip] + // let a = f16x8(1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5); + // #[rustfmt::skip] + // let r = f32x4(5.5, 6.5, 7.5, 8.5); + + // assert_eq!(r, __msa_fexupl_w(a)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fexupl_d() { + #[rustfmt::skip] + let a = f32x4(5.5, 6.5, 7.5, 8.5); + #[rustfmt::skip] + let r = f64x2(7.5, 8.5); + + assert_eq!(r, __msa_fexupl_d(a)); + } + + // FIXME: 16-bit floats + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_fexupr_w() { + // #[rustfmt::skip] + // let a = f16x8(1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5); + // #[rustfmt::skip] + // let r = f32x4(1.5, 2.5, 3.5, 4.5); + + // assert_eq!(r, __msa_fexupr_w(a)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fexupr_d() { + #[rustfmt::skip] + let a = f32x4(5.5, 6.5, 7.5, 8.5); + #[rustfmt::skip] + let r = f64x2(5.5, 6.5); + + assert_eq!(r, __msa_fexupr_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffint_s_w() { + #[rustfmt::skip] + let a = i32x4(-1, 2, -3, 4); + #[rustfmt::skip] + let r = f32x4(-1.0, 2.0, -3.0, 4.0); + + assert_eq!(r, __msa_ffint_s_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffint_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, 2); + #[rustfmt::skip] + let r = f64x2(-1.0, 2.0); + + assert_eq!(r, __msa_ffint_s_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffint_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let r = f32x4(1.0, 2.0, 3.0, 4.0); + + assert_eq!(r, __msa_ffint_u_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffint_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let r = f64x2(1.0, 2.0); + + assert_eq!(r, __msa_ffint_u_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffql_w() { + #[rustfmt::skip] + let a = i16x8(11, 25, 33, 47, 11, 25, 33, 47); + #[rustfmt::skip] + let r = f32x4( + 0.00033569336, 0.00076293945, + 0.0010070801, 0.0014343262 + ); + + assert_eq!(r, __msa_ffql_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffql_d() { + #[rustfmt::skip] + let a = i32x4(1111, 2222, 3333, 4444); + #[rustfmt::skip] + let r = f64x2( + 0.000001552049070596695, + 0.0000020693987607955933 + ); + + assert_eq!(r, __msa_ffql_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffqr_w() { + #[rustfmt::skip] + let a = i16x8(12, 26, 34, 48, 11, 25, 33, 47); + #[rustfmt::skip] + let r = f32x4( + 0.00036621094, 0.00079345703, + 0.0010375977, 0.0014648438 + ); + + assert_eq!(r, __msa_ffqr_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ffqr_d() { + #[rustfmt::skip] + let a = i32x4(1111, 2555, 3333, 475); + #[rustfmt::skip] + let r = f64x2( + 0.0000005173496901988983, + 0.0000011897645890712738 + ); + + assert_eq!(r, __msa_ffqr_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fill_b() { + #[rustfmt::skip] + let r = i8x16( + 2, 2, 2, 2, + 2, 2, 2, 2, + 2, 2, 2, 2, + 2, 2, 2, 2 + ); + + assert_eq!(r, __msa_fill_b(2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fill_h() { + #[rustfmt::skip] + let r = i16x8(2, 2, 2, 2, 2, 2, 2, 2); + + assert_eq!(r, __msa_fill_h(2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fill_w() { + #[rustfmt::skip] + let r = i32x4(2, 2, 2, 2); + + assert_eq!(r, __msa_fill_w(2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fill_d() { + #[rustfmt::skip] + let r = i64x2(2, 2); + + assert_eq!(r, __msa_fill_d(2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_flog2_w() { + #[rustfmt::skip] + let a = f32x4(8.0, 16.0, 32.0, 64.0); + #[rustfmt::skip] + let r = f32x4(3.0, 4.0, 5.0, 6.0); + + assert_eq!(r, __msa_flog2_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_flog2_d() { + #[rustfmt::skip] + let a = f64x2(8.0, 16.0); + #[rustfmt::skip] + let r = f64x2(3.0, 4.0); + + assert_eq!(r, __msa_flog2_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmadd_w() { + #[rustfmt::skip] + let a = f32x4(1.0, 2.0, 3.0, 4.0); + let b = f32x4(5.0, 6.0, 7.0, 8.0); + let c = f32x4(9.0, 10.0, 11.0, 12.0); + #[rustfmt::skip] + let r = f32x4(46.0, 62.0, 80.0, 100.0); + + assert_eq!(r, __msa_fmadd_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmadd_d() { + #[rustfmt::skip] + let a = f64x2(1.0, 2.0); + let b = f64x2(3.0, 4.0); + let c = f64x2(5.0, 6.0); + #[rustfmt::skip] + let r = f64x2(16.0, 26.0); + + assert_eq!(r, __msa_fmadd_d(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmax_w() { + #[rustfmt::skip] + let a = f32x4(1.0, -6.0, 7.0, 8.0); + let b = f32x4(5.0, -2.0, 3.0, 4.0); + #[rustfmt::skip] + let r = f32x4(5.0, -2.0, 7.0, 8.0); + + assert_eq!(r, __msa_fmax_w(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmax_d() { + #[rustfmt::skip] + let a = f64x2(1.0, 4.0); + let b = f64x2(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2(3.0, 4.0); + + assert_eq!(r, __msa_fmax_d(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmax_a_w() { + #[rustfmt::skip] + let a = f32x4(1.0, -6.0, -7.0, -8.0); + let b = f32x4(5.0, -2.0, 3.0, 4.0); + #[rustfmt::skip] + let r = f32x4(5.0, -6.0, -7.0, -8.0); + + assert_eq!(r, __msa_fmax_a_w(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmax_a_d() { + #[rustfmt::skip] + let a = f64x2(1.0, -4.0); + let b = f64x2(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2(3.0, -4.0); + + assert_eq!(r, __msa_fmax_a_d(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmin_w() { + #[rustfmt::skip] + let a = f32x4(1.0, -6.0, 7.0, 8.0); + let b = f32x4(5.0, -2.0, 3.0, 4.0); + #[rustfmt::skip] + let r = f32x4(1.0, -6.0, 3.0, 4.0); + + assert_eq!(r, __msa_fmin_w(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmin_d() { + #[rustfmt::skip] + let a = f64x2(1.0, 4.0); + let b = f64x2(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2(1.0, 2.0); + + assert_eq!(r, __msa_fmin_d(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmin_a_w() { + #[rustfmt::skip] + let a = f32x4(1.0, -6.0, -7.0, -8.0); + let b = f32x4(5.0, -2.0, 3.0, 4.0); + #[rustfmt::skip] + let r = f32x4(1.0, -2.0, 3.0, 4.0); + + assert_eq!(r, __msa_fmin_a_w(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmin_a_d() { + #[rustfmt::skip] + let a = f64x2(1.0, -4.0); + let b = f64x2(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2(1.0, 2.0); + + assert_eq!(r, __msa_fmin_a_d(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmsub_w() { + #[rustfmt::skip] + let a = f32x4(1.0, 2.0, 3.0, 4.0); + let b = f32x4(5.0, 6.0, 7.0, 8.0); + let c = f32x4(9.0, 10.0, 11.0, 12.0); + #[rustfmt::skip] + let r = f32x4(-44.0, -58.0, -74.0, -92.0); + + assert_eq!(r, __msa_fmsub_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmsub_d() { + #[rustfmt::skip] + let a = f64x2(1.0, 2.0); + let b = f64x2(3.0, 4.0); + let c = f64x2(5.0, 6.0); + #[rustfmt::skip] + let r = f64x2(-14.0, -22.0); + + assert_eq!(r, __msa_fmsub_d(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmul_w() { + #[rustfmt::skip] + let a = f32x4(1.1, -2.2, 3.3, 4.4); + #[rustfmt::skip] + let b = f32x4(4.4, 3.3, 2.2, -1.1); + let r = f32x4(4.84, -7.26, 7.26, -4.84); + + assert_eq!(r, __msa_fmul_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fmul_d() { + #[rustfmt::skip] + let a = f64x2(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2(4.0, -3.3); + let r = f64x2(4.4, 7.26); + + assert_eq!(r, __msa_fmul_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_frint_w() { + #[rustfmt::skip] + let a = f32x4(2.6, -2.7, 1.3, -1.7);; + #[rustfmt::skip] + let r = f32x4(3.0, -3.0, 1.0, -2.0); + + assert_eq!(r, __msa_frint_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_frint_d() { + #[rustfmt::skip] + let a = f64x2(2.6, 1.3); + #[rustfmt::skip] + let r = f64x2(3.0, 1.0); + + assert_eq!(r, __msa_frint_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_frcp_w() { + #[rustfmt::skip] + let a = f32x4(2.6, -2.7, 1.3, -1.7);; + #[rustfmt::skip] + let r = f32x4( + 0.3846154, -0.37037036, + 0.7692308, -0.58823526 + ); + + assert_eq!(r, __msa_frcp_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_frcp_d() { + #[rustfmt::skip] + let a = f64x2(2.6, 1.3); + #[rustfmt::skip] + let r = f64x2(0.3846153846153846, 0.7692307692307692); + + assert_eq!(r, __msa_frcp_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_frsqrt_w() { + #[rustfmt::skip] + let a = f32x4(2.6, 2.7, 1.3, 1.7);; + #[rustfmt::skip] + let r = f32x4( + 0.6201737, 0.6085806, + 0.87705797, 0.766965 + ); + + assert_eq!(r, __msa_frsqrt_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_frsqrt_d() { + #[rustfmt::skip] + let a = f64x2(2.6, 1.3); + #[rustfmt::skip] + let r = f64x2(0.6201736729460422, 0.8770580193070292); + + assert_eq!(r, __msa_frsqrt_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsaf_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4(-5.5, 5.5, 5.5, 5.5); + let r = i32x4(0, 0, 0, 0); + + assert_eq!(r, __msa_fsaf_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsaf_d() { + #[rustfmt::skip] + let a = f64x2(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2(125.5, 3.3); + let r = i64x2(0, 0); + + assert_eq!(r, __msa_fsaf_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fseq_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, -3.3, f32::NAN, f32::NAN); + #[rustfmt::skip] + let b = f32x4(5.5, -3.3, f32::NAN, 1.1); + let r = i32x4(0, -1, 0, 0); + + assert_eq!(r, __msa_fseq_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fseq_d() { + #[rustfmt::skip] + let a = f64x2(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2(125.5, 5.5); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fseq_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsle_w() { + #[rustfmt::skip] + let a = f32x4(5.5, 5.5, 5.5, f32::NAN); + #[rustfmt::skip] + let b = f32x4(-5.5, 3.3, 5.5, f32::NAN); + let r = i32x4(0, 0, -1, 0); + + assert_eq!(r, __msa_fsle_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsle_d() { + #[rustfmt::skip] + let a = f64x2(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2(125.5, 3.3); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_fsle_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fslt_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4(5.5, 3.3, 5.5, 1.1); + let r = i32x4(-1, 0, 0, 0); + + assert_eq!(r, __msa_fslt_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fslt_d() { + #[rustfmt::skip] + let a = f64x2(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2(125.5, 3.3); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_fslt_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsne_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4(5.5, 3.3, 5.5, 1.1); + let r = i32x4(-1, -1, 0, -1); + + assert_eq!(r, __msa_fsne_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsne_d() { + #[rustfmt::skip] + let a = f64x2(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2(125.5, 5.5); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_fsne_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsor_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, f32::NAN, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4(5.5, 3.3, 5.5, 1.1); + let r = i32x4(-1, 0, -1, -1); + + assert_eq!(r, __msa_fsor_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsor_d() { + #[rustfmt::skip] + let a = f64x2(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2(125.5, f64::NAN); + let r = i64x2(-1, 0); + + assert_eq!(r, __msa_fsor_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsqrt_w() { + #[rustfmt::skip] + let a = f32x4(9.0, 81.0, 1089.0, 10000.0); + let r = f32x4(3.0, 9.0, 33.0, 100.0); + + assert_eq!(r, __msa_fsqrt_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsqrt_d() { + #[rustfmt::skip] + let a = f64x2(81.0, 10000.0); + let r = f64x2(9.0, 100.0); + + assert_eq!(r, __msa_fsqrt_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsub_w() { + #[rustfmt::skip] + let a = f32x4(5.5, 6.5, 7.5, 8.5); + #[rustfmt::skip] + let b = f32x4(1.25, 1.75, 2.25, 2.75); + let r = f32x4(4.25, 4.75, 5.25, 5.75); + + assert_eq!(r, __msa_fsub_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsub_d() { + #[rustfmt::skip] + let a = f64x2(555.5, 55.5); + #[rustfmt::skip] + let b = f64x2(4.25, 3.25); + let r = f64x2(551.25, 52.25); + + assert_eq!(r, __msa_fsub_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsueq_w() { + #[rustfmt::skip] + let a = f32x4(5.5, f32::NAN, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4(5.5, 5.5, -5.5, 5.5); + let r = i32x4(-1, -1, 0, -1); + + assert_eq!(r, __msa_fsueq_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsueq_d() { + #[rustfmt::skip] + let a = f64x2(-5.5, 5.5); + #[rustfmt::skip] + let b = f64x2(5.5, f64::NAN); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fsueq_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsule_w() { + #[rustfmt::skip] + let a = f32x4(5.7, 5.8, 5.9, f32::NAN); + #[rustfmt::skip] + let b = f32x4(5.6, 5.9, 5.9, f32::NAN); + let r = i32x4(0, -1, -1, -1); + + assert_eq!(r, __msa_fsule_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsule_d() { + #[rustfmt::skip] + let a = f64x2(5.5, 5.5); + #[rustfmt::skip] + let b = f64x2(5.5, 5.5); + let r = i64x2(-1, -1); + + assert_eq!(r, __msa_fsule_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsult_w() { + #[rustfmt::skip] + let a = f32x4(5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4(5.6, f32::NAN, 2.2, 1.1); + let r = i32x4(-1, -1, 0, 0); + + assert_eq!(r, __msa_fsult_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsult_d() { + #[rustfmt::skip] + let a = f64x2(5.5, f64::NAN); + #[rustfmt::skip] + let b = f64x2(4.4, 3.3); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fsult_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsun_w() { + #[rustfmt::skip] + let a = f32x4(5.5, 5.5, f32::NAN, 5.5); + #[rustfmt::skip] + let b = f32x4(4.4, 3.3, 2.2, f32::NAN); + let r = i32x4(0, 0, -1, -1); + + assert_eq!(r, __msa_fsun_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsun_d() { + #[rustfmt::skip] + let a = f64x2(5.5, f64::NAN); + #[rustfmt::skip] + let b = f64x2(4.4, 3.3); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fsun_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsune_w() { + #[rustfmt::skip] + let a = f32x4(5.5, 5.5, f32::NAN, 5.5); + #[rustfmt::skip] + let b = f32x4(4.4, 3.3, 2.2, 5.5); + let r = i32x4(-1, -1, -1, 0); + + assert_eq!(r, __msa_fsune_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_fsune_d() { + #[rustfmt::skip] + let a = f64x2(5.5, f64::NAN); + #[rustfmt::skip] + let b = f64x2(5.5, 3.3); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_fsune_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftint_s_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); + let r = i32x4(-6, 76, -1001, 1219); + + assert_eq!(r, __msa_ftint_s_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftint_s_d() { + #[rustfmt::skip] + let a = f64x2(-5.5, 25656.4); + let r = i64x2(-6, 25656); + + assert_eq!(r, __msa_ftint_s_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftint_u_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); + let r = u32x4(0, 76, 0, 1219); + + assert_eq!(r, __msa_ftint_u_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftint_u_d() { + #[rustfmt::skip] + let a = f64x2(5.5, -25656.4); + let r = u64x2(6, 0); + + assert_eq!(r, __msa_ftint_u_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftq_h() { + #[rustfmt::skip] + let a = f32x4(0.00001, 0.0002, 0.00001, -0.0002); + #[rustfmt::skip] + let b = f32x4(0.0001, -0.002, 0.0001, 0.002); + let r = i16x8(3, -66, 3, 66, 0, 7, 0, -7); + + assert_eq!(r, __msa_ftq_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftq_w() { + #[rustfmt::skip] + let a = f64x2(0.00001, -0.0002); + #[rustfmt::skip] + let b = f64x2(0.00000045, 0.000015); + let r = i32x4(966, 32212, 21475, -429497); + + assert_eq!(r, __msa_ftq_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftrunc_s_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); + let r = i32x4(-5, 75, -1000, 1219); + + assert_eq!(r, __msa_ftrunc_s_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftrunc_s_d() { + #[rustfmt::skip] + let a = f64x2(-5.5, 25656.4); + let r = i64x2(-5, 25656); + + assert_eq!(r, __msa_ftrunc_s_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftrunc_u_w() { + #[rustfmt::skip] + let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); + let r = u32x4(0, 75, 0, 1219); + + assert_eq!(r, __msa_ftrunc_u_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ftrunc_u_d() { + #[rustfmt::skip] + let a = f64x2(5.5, -25656.4); + let r = u64x2(5, 0); + + assert_eq!(r, __msa_ftrunc_u_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hadd_s_h() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i16x8(6, 6, 2, -2, 6, 6, 2, -2); + + assert_eq!(r, __msa_hadd_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hadd_s_w() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i32x4(6, 6, 2, -2); + + assert_eq!(r, __msa_hadd_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hadd_s_d() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i64x2(2, -2); + + assert_eq!(r, __msa_hadd_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hadd_u_h() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = u16x8(6, 6, 6, 6, 6, 6, 6, 6); + + assert_eq!(r, __msa_hadd_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hadd_u_w() { + #[rustfmt::skip] + let a = u16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = u32x4(6, 6, 6, 6); + + assert_eq!(r, __msa_hadd_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hadd_u_d() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(4, 3, 2, 1); + let r = u64x2(6, 6); + + assert_eq!(r, __msa_hadd_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hsub_s_h() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i16x8(-2, 2, -6, -6, -2, 2, -6, -6); + + assert_eq!(r, __msa_hsub_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hsub_s_w() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i32x4(-2, 2, -6, -6); + + assert_eq!(r, __msa_hsub_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hsub_s_d() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i64x2(-6, -6); + + assert_eq!(r, __msa_hsub_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hsub_u_h() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i16x8(-2, 2, -2, 2, -2, 2, -2, 2); + + assert_eq!(r, __msa_hsub_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hsub_u_w() { + #[rustfmt::skip] + let a = u16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i32x4(-2, 2, -2, 2); + + assert_eq!(r, __msa_hsub_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_hsub_u_d() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(4, 3, 2, 1); + let r = i64x2(-2, 2); + + assert_eq!(r, __msa_hsub_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvev_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i8x16( + 4, 1, 2, 3, + 4, 1, 2, 3, + 4, 1, 2, 3, + 4, 1, 2, 3 + ); + + assert_eq!(r, __msa_ilvev_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvev_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i16x8(4, 1, 2, 3, 4, 1, 2, 3); + + assert_eq!(r, __msa_ilvev_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvev_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(4, 1, 2, 3); + + assert_eq!(r, __msa_ilvev_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvev_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(4, 3); + let r = i64x2(4, 1); + + assert_eq!(r, __msa_ilvev_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvl_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = i8x16( + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + 8, 9, 7, 10, + 6, 11, 5, 12, + 4, 13, 3, 14, + 2, 15, 1, 16 + ); + + assert_eq!(r, __msa_ilvl_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvl_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 5, 6, 7, 8 + ); + #[rustfmt::skip] + let b = i16x8( + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i16x8(4, 5, 3, 6, 2, 7, 1, 8); + + assert_eq!(r, __msa_ilvl_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvl_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(2, 3, 1, 4); + + assert_eq!(r, __msa_ilvl_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvl_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(2, 1); + let r = i64x2(1, 2); + + assert_eq!(r, __msa_ilvl_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvod_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = i8x16( + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + 15, 2, 13, 4, + 11, 6, 9, 8, + 7, 10, 5, 12, + 3, 14, 1, 16 + ); + + assert_eq!(r, __msa_ilvod_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvod_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 5, 6, 7, 8 + ); + #[rustfmt::skip] + let b = i16x8( + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i16x8(7, 2, 5, 4, 3, 6, 1, 8); + + assert_eq!(r, __msa_ilvod_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvod_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(3, 2, 1, 4); + + assert_eq!(r, __msa_ilvod_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvod_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(2, 1); + let r = i64x2(1, 2); + + assert_eq!(r, __msa_ilvod_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvr_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = i8x16( + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + 16, 1, 15, 2, + 14, 3, 13, 4, + 12, 5, 11, 6, + 10, 7, 9, 8 + ); + + assert_eq!(r, __msa_ilvr_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvr_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 5, 6, 7, 8, + ); + #[rustfmt::skip] + let b = i16x8( + 8, 7, 6, 5, + 4, 3, 2, 1, + ); + let r = i16x8(8, 1, 7, 2, 6, 3, 5, 4); + + assert_eq!(r, __msa_ilvr_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvr_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(4, 1, 3, 2); + + assert_eq!(r, __msa_ilvr_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ilvr_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(2, 1); + let r = i64x2(2, 1); + + assert_eq!(r, __msa_ilvr_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insert_b() { + #[rustfmt::skip] + let a = i8x16( + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127 + ); + let r = i8x16( + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127, + 5, 127, 4, 127 + ); + + assert_eq!(r, __msa_insert_b(a, 12, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insert_h() { + #[rustfmt::skip] + let a = i16x8( + 32767, 3276, 100, 11, + 32767, 3276, 100, 11 + ); + let r = i16x8( + 32767, 3276, 100, 11, + 5, 3276, 100, 11 + ); + + assert_eq!(r, __msa_insert_h(a, 4, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insert_w() { + #[rustfmt::skip] + let a = i32x4(100, 2147483647, 5, -2147483647); + let r = i32x4(100, 7, 5, -2147483647); + + assert_eq!(r, __msa_insert_w(a, 1, 7)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insert_d() { + #[rustfmt::skip] + let a = i64x2(3, i64::max_value()); + #[rustfmt::skip] + let r = i64x2(3, 100); + + assert_eq!(r, __msa_insert_d(a, 1, 100)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insve_b() { + #[rustfmt::skip] + let a = i8x16( + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127 + ); + let b = i8x16( + 5, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = i8x16( + -100, 127, 4, 127, + -100, 127, 4, 127, + -100, 127, 4, 127, + 5, 127, 4, 127 + ); + + assert_eq!(r, __msa_insve_b(a, 12, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insve_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 3276, 100, 11, + i16::max_value(), 3276, 100, 11 + ); + let b = i16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = i16x8( + 32767, 3276, 100, 11, + 1, 3276, 100, 11 + ); + + assert_eq!(r, __msa_insve_h(a, 4, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insve_w() { + #[rustfmt::skip] + let a = i32x4(100, 2147483647, 5, -2147483647); + let b = i32x4(1, 2, 3, 4); + let r = i32x4(100, 2147483647, 5, 1); + + assert_eq!(r, __msa_insve_w(a, 3, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_insve_d() { + #[rustfmt::skip] + let a = i64x2(3, i64::max_value()); + let b = i64x2(1, 2); + #[rustfmt::skip] + let r = i64x2(3, 1); + + assert_eq!(r, __msa_insve_d(a, 1, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ld_b() { + + let mut a : [i8; 32] = [ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ]; + let p = &mut a[4] as *mut _ as *mut i8; + + let r = i8x16( + 13, 14, 15, 16, + 17, 18, 19, 20, + 21, 22, 23, 24, + 25, 26, 27, 28 + ); + + assert_eq!(r, __msa_ld_b(p, 9)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ld_h() { + + let mut a : [i16; 16] = [ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 + ]; + let p = &mut a[4] as *mut _ as *mut i8; + + let r = i16x8(3, 4, 5, 6, 7, 8, 9, 10); + + assert_eq!(r, __msa_ld_h(p, -2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ld_w() { + + let mut a : [i32; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; + let p = &mut a[3] as *mut _ as *mut i8; + + let r = i32x4(2, 3, 4, 5); + + assert_eq!(r, __msa_ld_w(p, -4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ld_d() { + + let mut a : [i64; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; + let p = &mut a[4] as *mut _ as *mut i8; + + let r = i64x2(0, 1); + + assert_eq!(r, __msa_ld_d(p, -32)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ldi_b() { + let r = i8x16( + -20, -20, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20 + ); + + assert_eq!(r, __msa_ldi_b(-20)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ldi_h() { + let r = i16x8( + 255, 255, 255, 255, + 255, 255, 255, 255 + ); + + assert_eq!(r, __msa_ldi_h(255)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ldi_w() { + let r = i32x4(-509, -509, -509, -509); + + assert_eq!(r, __msa_ldi_w(-509)); + } + + // FIXME: https://reviews.llvm.org/D59884 + // If target type is i64, negative immediate loses the sign + // Test passes if 4294967185 is used instead -111 in vector 'r' + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_ldi_d() { + // let r = i64x2(-111, -111); + + // assert_eq!(r, __msa_ldi_d(-111)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_madd_q_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 1024, i16::min_value(), -1024, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 1024, 1024, 1024, 1024, + 1024, 1024, 1024, 1024 + ); + #[rustfmt::skip] + let c = i16x8( + i16::max_value(), i16::max_value(), 1, -1, + 33, 66, 99, 132 + ); + #[rustfmt::skip] + let r = i16x8(32767, 2047, -32768, -1025, 2, 4, 6, 8); + + assert_eq!(r, __msa_madd_q_h(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_madd_q_w() { + #[rustfmt::skip] + let a = i32x4(i32::max_value(), i32::min_value(), 1, 2); + #[rustfmt::skip] + let b = i32x4(102401, 102401, 102401, 102401); + #[rustfmt::skip] + let c = i32x4(10240, 20480, 30720, 40960); + #[rustfmt::skip] + let r = i32x4(2147483647, -2147483648, 2, 3); + + assert_eq!(r, __msa_madd_q_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maddr_q_h() { + #[rustfmt::skip] + let a = i16x8( + 32767, 1024, -32768, -1024, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 1024, 1024, 1024, 1024, + 1024, 1024, 1024, 1024 + ); + #[rustfmt::skip] + let c = i16x8( + 32767, 32767, 32767, 32767, + 33, 66, 99, 132 + ); + #[rustfmt::skip] + let r = i16x8(32767, 2048, -31744, 0, 2, 4, 6, 8); + + assert_eq!(r, __msa_maddr_q_h(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maddr_q_w() { + #[rustfmt::skip] + let a = i32x4(i32::max_value(), i32::min_value(), 1, 2); + #[rustfmt::skip] + let b = i32x4(102401, 102401, 102401, 102401); + #[rustfmt::skip] + let c = i32x4(10240, 20480, 30720, 40960); + #[rustfmt::skip] + let r = i32x4(2147483647, -2147483647, 2, 4); + + assert_eq!(r, __msa_maddr_q_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maddv_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + 5, 6, 7, 8, + 5, 6, 7, 8, + 5, 6, 7, 8, + 5, 6, 7, 8 + ); + #[rustfmt::skip] + let c = i8x16( + 9, 10, 11, 12, + 9, 10, 11, 12, + 9, 10, 11, 12, + 9, 10, 11, 12 + ); + #[rustfmt::skip] + let r = i8x16( + 46, 62, 80, 100, + 46, 62, 80, 100, + 46, 62, 80, 100, + 46, 62, 80, 100 + ); + + assert_eq!(r, __msa_maddv_b(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maddv_h() { + #[rustfmt::skip] + let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = i16x8(5, 6, 7, 8, 5, 6, 7, 8); + #[rustfmt::skip] + let c = i16x8(9, 10, 11, 12, 9, 10, 11, 12); + #[rustfmt::skip] + let r = i16x8(46, 62, 80, 100, 46, 62, 80, 100); + + assert_eq!(r, __msa_maddv_h(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maddv_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 1, 2); + #[rustfmt::skip] + let b = i32x4(3, 4, 3, 4); + #[rustfmt::skip] + let c = i32x4(5, 6, 5, 6); + #[rustfmt::skip] + let r = i32x4(16, 26, 16, 26); + + assert_eq!(r, __msa_maddv_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maddv_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(3, 4); + #[rustfmt::skip] + let c = i64x2(5, 6); + #[rustfmt::skip] + let r = i64x2(16, 26); + + assert_eq!(r, __msa_maddv_d(a,b,c)); + } + + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_a_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + 6, 7, 8, 9, + -6, -7, -8, -9, + 6, 7, 8, 9 + ); + let r = i8x16( + -6, -7, -8, -9, + 6, 7, 8, 9, + -6, -7, -8, -9, + 6, 7, 8, 9 + ); + + assert_eq!(r, __msa_max_a_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_a_h() { + #[rustfmt::skip] + let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + let r = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + + assert_eq!(r, __msa_max_a_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_a_w() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(6, 7, 8, 9); + let r = i32x4(6, 7, 8, 9); + + assert_eq!(r, __msa_max_a_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_a_d() { + #[rustfmt::skip] + let a = i64x2(-1, 2); + #[rustfmt::skip] + let b = i64x2(6, -7); + let r = i64x2(6, -7); + + assert_eq!(r, __msa_max_a_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_s_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + 6, 7, 8, 9, + -6, -7, -8, -9, + 6, 7, 8, 9 + ); + let r = i8x16( + 1, 2, 3, 4, + 6, 7, 8, 9, + 1, 2, 3, 4, + 6, 7, 8, 9 + ); + + assert_eq!(r, __msa_max_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_s_h() { + #[rustfmt::skip] + let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + let r = i16x8(1, 7, 3, 9, 1, 7, 3, 9); + + assert_eq!(r, __msa_max_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_s_w() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(6, 7, 8, 9); + let r = i32x4(6, 7, 8, 9); + + assert_eq!(r, __msa_max_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, 2); + #[rustfmt::skip] + let b = i64x2(6, -7); + let r = i64x2(6, 2); + + assert_eq!(r, __msa_max_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + + assert_eq!(r, __msa_max_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + + assert_eq!(r, __msa_max_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(6, 7, 8, 9); + + assert_eq!(r, __msa_max_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_max_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(6, 7); + + assert_eq!(r, __msa_max_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_s_b() { + #[rustfmt::skip] + let a = i8x16( + 1, -20, -6, 8, + 1, -20, -6, 8, + 1, -20, -6, 8, + 1, -20, -6, 8 + ); + + let r = i8x16( + 1, -16, -6, 8, + 1, -16, -6, 8, + 1, -16, -6, 8, + 1, -16, -6, 8 + ); + + assert_eq!(r, __msa_maxi_s_b(a, -16)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_s_h() { + #[rustfmt::skip] + let a = i16x8(1, 3, -60, -8, 1, 3, -6, -8); + let r = i16x8(15, 15, 15, 15, 15, 15, 15, 15); + + assert_eq!(r, __msa_maxi_s_h(a, 15)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_s_w() { + #[rustfmt::skip] + let a = i32x4(1, 3, -6, -8); + let r = i32x4(1, 3, -5, -5); + + assert_eq!(r, __msa_maxi_s_w(a, -5)); + } + + // FIXME: https://reviews.llvm.org/D59884 + // If target type is i64, negative immediate loses the sign + // Test passes if 4294967293 is used instead -3 in vector 'r' + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_maxi_s_d() { + // #[rustfmt::skip] + // let a = i64x2(1, -8); + + // let r = i64x2(-3, -3); + + // assert_eq!(r, __msa_maxi_s_d(a, -3)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 3, 6, 8, + 1, 3, 6, 8, + 1, 3, 6, 8, + 1, 3, 6, 8 + ); + + let r = u8x16( + 5, 5, 6, 8, + 5, 5, 6, 8, + 5, 5, 6, 8, + 5, 5, 6, 8 + ); + + assert_eq!(r, __msa_maxi_u_b(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 3, 6, 8, 1, 3, 6, 8); + let r = u16x8(5, 5, 6, 8, 5, 5, 6, 8); + + assert_eq!(r, __msa_maxi_u_h(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 3, 6, 8); + let r = u32x4(5, 5, 6, 8); + + assert_eq!(r, __msa_maxi_u_w(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_maxi_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 8); + let r = u64x2(5, 8); + + assert_eq!(r, __msa_maxi_u_d(a, 5)); + } + + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_a_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + 6, 7, 8, 9, + -6, -7, -8, -9, + 6, 7, 8, 9 + ); + let r = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + + assert_eq!(r, __msa_min_a_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_a_h() { + #[rustfmt::skip] + let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + let r = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + + assert_eq!(r, __msa_min_a_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_a_w() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(6, 7, 8, 9); + let r = i32x4(1, -2, 3, -4); + + assert_eq!(r, __msa_min_a_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_a_d() { + #[rustfmt::skip] + let a = i64x2(-1, 2); + #[rustfmt::skip] + let b = i64x2(6, -7); + let r = i64x2(-1, 2); + + assert_eq!(r, __msa_min_a_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_s_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + 6, 7, 8, 9, + -6, -7, -8, -9, + 6, 7, 8, 9 + ); + let r = i8x16( + -6, -7, -8, -9, + -1, -2, -3, -4, + -6, -7, -8, -9, + -1, -2, -3, -4 + ); + + assert_eq!(r, __msa_min_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_s_h() { + #[rustfmt::skip] + let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + let r = i16x8(-6, -2, -8, -4, -6, -2, -8, -4); + + assert_eq!(r, __msa_min_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_s_w() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4(6, 7, 8, 9); + let r = i32x4(1, -2, 3, -4); + + assert_eq!(r, __msa_min_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_s_d() { + #[rustfmt::skip] + let a = i64x2(-1, 2); + #[rustfmt::skip] + let b = i64x2(6, -7); + let r = i64x2(-1, -7); + + assert_eq!(r, __msa_min_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_s_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + let r = i8x16( + -10, -10, -10, -10, + -10, -10, -10, -10, + -10, -10, -10, -10, + -10, -10, -10, -10 + ); + + assert_eq!(r, __msa_mini_s_b(a, -10)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_s_h() { + #[rustfmt::skip] + let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let r = i16x8(-3, -3, -3, -4, -3, -3, -3, -4); + + assert_eq!(r, __msa_mini_s_h(a, -3)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_s_w() { + #[rustfmt::skip] + let a = i32x4(1, -2, 3, -4); + let r = i32x4(-3, -3, -3, -4); + + assert_eq!(r, __msa_mini_s_w(a, -3)); + } + + // FIXME: https://reviews.llvm.org/D59884 + // If target type is i64, negative immediate loses the sign + // -3 is represented as 4294967293 + // #[simd_test(enable = "msa")] + // unsafe fn test_msa_mini_s_d() { + // #[rustfmt::skip] + // let a = i64x2(-3, 2); + // let r = i64x2(-1, -3); + + // assert_eq!(r, __msa_mini_s_d(a, -3)); + // } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + + assert_eq!(r, __msa_min_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(1, 2, 3, 4, 1, 2, 3, 4,); + + assert_eq!(r, __msa_min_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(1, 2, 3, 4,); + + assert_eq!(r, __msa_min_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_min_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(1, 2,); + + assert_eq!(r, __msa_min_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_u_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 3, 6, 8, + 1, 3, 6, 8, + 1, 3, 6, 8, + 1, 3, 6, 8 + ); + + let r = u8x16( + 1, 3, 5, 5, + 1, 3, 5, 5, + 1, 3, 5, 5, + 1, 3, 5, 5 + ); + + assert_eq!(r, __msa_mini_u_b(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_u_h() { + #[rustfmt::skip] + let a = u16x8(1, 3, 6, 8, 1, 3, 6, 8); + let r = u16x8(1, 3, 5, 5, 1, 3, 5, 5); + + assert_eq!(r, __msa_mini_u_h(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_u_w() { + #[rustfmt::skip] + let a = u32x4(1, 3, 6, 8); + let r = u32x4(1, 3, 5, 5); + + assert_eq!(r, __msa_mini_u_w(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mini_u_d() { + #[rustfmt::skip] + let a = u64x2(1, 8); + let r = u64x2(1, 5); + + assert_eq!(r, __msa_mini_u_d(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_s_b() { + #[rustfmt::skip] + let a = i8x16( + -6, -7, -8, -9, + 6, 7, 8, 9, + -6, -7, -8, -9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let b = i8x16( + 1, 2, 3, 4, + -1, -2, -3, -4, + 1, 2, 3, 4, + -1, -2, -3, -4 + ); + let r = i8x16( + 0, -1, -2, -1, + 0, 1, 2, 1, + 0, -1, -2, -1, + 0, 1, 2, 1 + ); + + assert_eq!(r, __msa_mod_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_s_h() { + #[rustfmt::skip] + let a = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + #[rustfmt::skip] + let b = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let r = i16x8(0, 1, -2, 1, 0, 1, -2, 1); + + assert_eq!(r, __msa_mod_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_s_w() { + #[rustfmt::skip] + let a = i32x4(6, 7, 8, 9); + #[rustfmt::skip] + let b = i32x4(1, -2, 3, -4); + let r = i32x4(0, 1, 2, 1); + + assert_eq!(r, __msa_mod_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_s_d() { + #[rustfmt::skip] + let a = i64x2(6, -7); + #[rustfmt::skip] + let b = i64x2(-1, 2); + let r = i64x2(0, -1); + + assert_eq!(r, __msa_mod_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_u_b() { + #[rustfmt::skip] + let a = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + #[rustfmt::skip] + let b = u8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = u8x16( + 0, 1, 2, 1, + 0, 1, 2, 1, + 0, 1, 2, 1, + 0, 1, 2, 1 + ); + + assert_eq!(r, __msa_mod_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_u_h() { + #[rustfmt::skip] + let a = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let b = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let r = u16x8(0, 1, 2, 1, 0, 1, 2, 1); + + assert_eq!(r, __msa_mod_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_u_w() { + #[rustfmt::skip] + let a = u32x4(6, 7, 8, 9); + #[rustfmt::skip] + let b = u32x4(1, 2, 3, 4); + let r = u32x4(0, 1, 2, 1); + + assert_eq!(r, __msa_mod_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mod_u_d() { + #[rustfmt::skip] + let a = u64x2(6, 7); + #[rustfmt::skip] + let b = u64x2(1, 2); + let r = u64x2(0, 1); + + assert_eq!(r, __msa_mod_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_move_v() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 1, 2, 3, 4, + 5, 6, 7, 8 + ); + #[rustfmt::skip] + let r = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 1, 2, 3, 4, + 5, 6, 7, 8 + ); + + assert_eq!(r, __msa_move_v(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msub_q_h() { + #[rustfmt::skip] + let a = i16x8( + 1024, -1024, 1024, -1024, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 1025, 1025, 1025, 1025, + 1025, 1025, 1025, 1025 + ); + #[rustfmt::skip] + let c = i16x8( + 1024, 2048, 3072, 4096, + 1024, 2048, 3072, 4096 + ); + #[rustfmt::skip] + let r = i16x8(991, -1089, 927, -1153, -32, -63, -94, -125); + + assert_eq!(r, __msa_msub_q_h(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msub_q_w() { + #[rustfmt::skip] + let a = i32x4(2147483647, -2147483647, 1, 2); + #[rustfmt::skip] + let b = i32x4(10240, 10240, 10240, 10240); + #[rustfmt::skip] + let c = i32x4(10240, 20480, 30720, 40960); + #[rustfmt::skip] + let r = i32x4(2147483646, -2147483648, 0, 1); + + assert_eq!(r, __msa_msub_q_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msubr_q_h() { + #[rustfmt::skip] + let a = i16x8( + 1024, -1024, 1024, -1024, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 1025, 1025, 1025, 1025, + 1025, 1025, 1025, 1025 + ); + #[rustfmt::skip] + let c = i16x8( + 1024, 2048, 3072, 4096, + 1024, 2048, 3072, 4096 + ); + #[rustfmt::skip] + let r = i16x8(992, -1088, 928, -1152, -31, -62, -93, -124); + + assert_eq!(r, __msa_msubr_q_h(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msubr_q_w() { + #[rustfmt::skip] + let a = i32x4(i32::max_value(), -2147483647, 1, 2); + let b = i32x4(10240, 10240, 10240, 10240); + let c = i32x4(10240, 20480, 30720, 40960); + #[rustfmt::skip] + let r = i32x4(2147483647, -2147483647, 1, 2); + + assert_eq!(r, __msa_msubr_q_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msubv_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let b = i8x16( + 5, 6, 7, 8, + 5, 6, 7, 8, + 5, 6, 7, 8, + 5, 6, 7, 8 + ); + let c = i8x16( + 9, 10, 11, 12, + 9, 10, 11, 12, + 9, 10, 11, 12, + 9, 10, 11, 12 + ); + #[rustfmt::skip] + let r = i8x16( + -44, -58, -74, -92, + -44, -58, -74, -92, + -44, -58, -74, -92, + -44, -58, -74, -92 + ); + + assert_eq!(r, __msa_msubv_b(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msubv_h() { + #[rustfmt::skip] + let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + let b = i16x8(5, 6, 7, 8, 5, 6, 7, 8); + let c = i16x8(9, 10, 11, 12, 9, 10, 11, 12); + #[rustfmt::skip] + let r = i16x8(-44, -58, -74, -92, -44, -58, -74, -92); + + assert_eq!(r, __msa_msubv_h(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msubv_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 1, 2); + let b = i32x4(3, 4, 3, 4); + let c = i32x4(5, 6, 5, 6); + #[rustfmt::skip] + let r = i32x4(-14, -22, -14, -22); + + assert_eq!(r, __msa_msubv_w(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_msubv_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + let b = i64x2(3, 4); + let c = i64x2(5, 6); + #[rustfmt::skip] + let r = i64x2(-14, -22); + + assert_eq!(r, __msa_msubv_d(a,b,c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mul_q_h() { + #[rustfmt::skip] + let a = i16x8( + 12500, -20, -300, 400, + 12500, 20, 300, 400 + ); + let b = i16x8( + 1250, 10240, -7585, 8456, + 1250, 10240, -7585, 8456 + ); + #[rustfmt::skip] + let r = i16x8(476, -7, 69, 103, 476, 6, -70, 103); + + assert_eq!(r, __msa_mul_q_h(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mul_q_w() { + #[rustfmt::skip] + let a = i32x4( + i32::max_value(), i32::max_value(), + i32::min_value(), i32::min_value() + ); + let b = i32x4(30, 60, 30, 60); + #[rustfmt::skip] + let r = i32x4(29, 59, -30, -60); + + assert_eq!(r, __msa_mul_q_w(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mulr_q_h() { + #[rustfmt::skip] + let a = i16x8( + 12500, -20, -300, 400, + 12500, 20, 300, 400 + ); + let b = i16x8( + 1250, 10240, -7585, 8456, + 1250, 10240, -7585, 8456 + ); + #[rustfmt::skip] + let r = i16x8(477, -6, 69, 103, 477, 6, -69, 103); + + assert_eq!(r, __msa_mulr_q_h(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mulr_q_w() { + #[rustfmt::skip] + let a = i32x4( + i32::max_value(), i32::max_value(), + i32::min_value(), i32::min_value() + ); + let b = i32x4(30, 60, 30, 60); + #[rustfmt::skip] + let r = i32x4(30, 60, -30, -60); + + assert_eq!(r, __msa_mulr_q_w(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mulv_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = i8x16( + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + 16, 30, 42, 52, + 60, 66, 70, 72, + 72, 70, 66, 60, + 52, 42, 30, 16 + ); + + assert_eq!(r, __msa_mulv_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mulv_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 5, 6, 7, 8 + ); + #[rustfmt::skip] + let b = i16x8( + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i16x8(8, 14, 18, 20, 20, 18, 14, 8); + + assert_eq!(r, __msa_mulv_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mulv_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(4, 6, 6, 4); + + assert_eq!(r, __msa_mulv_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_mulv_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(2, 1); + let r = i64x2(2, 2); + + assert_eq!(r, __msa_mulv_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nloc_b() { + #[rustfmt::skip] + let a = i8x16( + -128, -64, -32, -16, + -8, -4, -2, -1, + 1, 2, 4, 8, + 16, 32, 64, 127 + ); + let r = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 0, 0, 0, 0, + 0, 0, 0, 0 + ); + + assert_eq!(r, __msa_nloc_b(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nloc_h() { + #[rustfmt::skip] + let a = i16x8( + -32768, -16384, -8192, -4096, + 4096, 8192, 16384, 32767 + ); + let r = i16x8(1, 2, 3, 4, 0, 0, 0, 0); + + assert_eq!(r, __msa_nloc_h(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nloc_w() { + #[rustfmt::skip] + let a = i32x4( + i32::min_value(), -1073741824, + 1073741824, i32::max_value() + ); + let r = i32x4(1, 2, 0, 0); + + assert_eq!(r, __msa_nloc_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nloc_d() { + #[rustfmt::skip] + let a = i64x2(i64::min_value(), i64::max_value()); + let r = i64x2(1, 0); + + assert_eq!(r, __msa_nloc_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nlzc_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + let r = i8x16( + 7, 6, 6, 5, + 5, 5, 5, 4, + 4, 4, 4, 4, + 4, 4, 4, 3 + ); + + assert_eq!(r, __msa_nlzc_b(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nlzc_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 5, 6, 7, 8 + ); + let r = i16x8(15, 14, 14, 13, 13, 13, 13, 12); + + assert_eq!(r, __msa_nlzc_h(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nlzc_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + let r = i32x4(31, 30, 30, 29); + + assert_eq!(r, __msa_nlzc_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nlzc_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + let r = i64x2(63, 62); + + assert_eq!(r, __msa_nlzc_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nor_v() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + let r = u8x16( + 254, 253, 252, 251, + 250, 249, 248, 247, + 246, 245, 244, 243, + 242, 241, 240, 239 + ); + + assert_eq!(r, __msa_nor_v(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_nori_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + let r = u8x16( + 250, 249, 248, 251, + 250, 249, 248, 243, + 242, 241, 240, 243, + 242, 241, 240, 235 + ); + + assert_eq!(r, __msa_nori_b(a, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_or_v() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + let r = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + + assert_eq!(r, __msa_or_v(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_ori_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + let r = u8x16( + 5, 6, 7, 4, + 5, 6, 7, 12, + 13, 14, 15, 12, + 13, 14, 15, 20 + ); + + assert_eq!(r, __msa_ori_b(a, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckev_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i8x16( + 4, 2, 4, 2, + 4, 2, 4, 2, + 1, 3, 1, 3, + 1, 3, 1, 3 + ); + + assert_eq!(r, __msa_pckev_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckev_h() { + #[rustfmt::skip] + let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = i16x8(4, 3, 2, 1, 4, 3, 2, 1); + let r = i16x8(4, 2, 4, 2, 1, 3, 1, 3); + + assert_eq!(r, __msa_pckev_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckev_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(4, 2, 1, 3); + + assert_eq!(r, __msa_pckev_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckev_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(4, 3); + let r = i64x2(4, 1); + + assert_eq!(r, __msa_pckev_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckod_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i8x16( + 3, 1, 3, 1, + 3, 1, 3, 1, + 2, 4, 2, 4, + 2, 4, 2, 4 + ); + + assert_eq!(r, __msa_pckod_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckod_h() { + #[rustfmt::skip] + let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = i16x8(4, 3, 2, 1, 4, 3, 2, 1); + let r = i16x8(3, 1, 3, 1, 2, 4, 2, 4); + + assert_eq!(r, __msa_pckod_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckod_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(3, 1, 2, 4); + + assert_eq!(r, __msa_pckod_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pckod_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(4, 3); + let r = i64x2(3, 2); + + assert_eq!(r, __msa_pckod_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pcnt_b() { + #[rustfmt::skip] + let a = i8x16( + -128, -64, -32, -16, + -8, -4, -2, -1, + 1, 2, 4, 8, + 16, 32, 64, 127 + ); + let r = i8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 1, 1, 1, 1, + 1, 1, 1, 7 + ); + + assert_eq!(r, __msa_pcnt_b(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pcnt_h() { + #[rustfmt::skip] + let a = i16x8( + -32768, -16384, -8192, -4096, + 4096, 8192, 16384, 32767 + ); + let r = i16x8(1, 2, 3, 4, 1, 1, 1, 15); + + assert_eq!(r, __msa_pcnt_h(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pcnt_w() { + #[rustfmt::skip] + let a = i32x4( + i32::min_value(), -1073741824, + 1073741824, i32::max_value() + ); + let r = i32x4(1, 2, 1, 31); + + assert_eq!(r, __msa_pcnt_w(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_pcnt_d() { + #[rustfmt::skip] + let a = i64x2(-2147483648, 2147483647); + let r = i64x2(33, 31); + + assert_eq!(r, __msa_pcnt_d(a)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_s_b() { + #[rustfmt::skip] + let a = i8x16( + i8::max_value(), 105, 30, 1, + i8::max_value(), 105, 30, 1, + i8::max_value(), 105, 30, 1, + i8::max_value(), 105, 30, 1 + ); + let r = i8x16( + 3, 3, 3, 1, + 3, 3, 3, 1, + 3, 3, 3, 1, + 3, 3, 3, 1 + ); + + assert_eq!(r, __msa_sat_s_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_s_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 1155, 155, 1, + i16::max_value(), 1155, 155, 1 + ); + let r = i16x8(127, 127, 127, 1, 127, 127, 127, 1); + + assert_eq!(r, __msa_sat_s_h(a, 7)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_s_w() { + #[rustfmt::skip] + let a = i32x4(i32::max_value(), 111111155, i32::max_value(), 1); + let r = i32x4(131071, 131071, 131071, 1); + + assert_eq!(r, __msa_sat_s_w(a, 17)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_s_d() { + #[rustfmt::skip] + let a = i64x2(i64::max_value(), 1); + let r = i64x2(137438953471, 1); + + assert_eq!(r, __msa_sat_s_d(a, 37)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_u_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 105, 30, 1, + u8::max_value(), 105, 30, 1, + u8::max_value(), 105, 30, 1, + u8::max_value(), 105, 30, 1 + ); + let r = u8x16( + 7, 7, 7, 1, + 7, 7, 7, 1, + 7, 7, 7, 1, + 7, 7, 7, 1 + ); + + assert_eq!(r, __msa_sat_u_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_u_h() { + #[rustfmt::skip] + let a = u16x8( + u16::max_value(), 1155, 155, 1, + u16::max_value(), 1155, 155, 1 + ); + let r = u16x8(255, 255, 155, 1, 255, 255, 155, 1); + + assert_eq!(r, __msa_sat_u_h(a, 7)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_u_w() { + #[rustfmt::skip] + let a = u32x4(u32::max_value(), 111111155, u32::max_value(), 1); + let r = u32x4(262143, 262143, 262143, 1); + + assert_eq!(r, __msa_sat_u_w(a, 17)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sat_u_d() { + #[rustfmt::skip] + let a = u64x2(u64::max_value(), 1); + let r = u64x2(274877906943, 1); + + assert_eq!(r, __msa_sat_u_d(a, 37)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_shf_b() { + #[rustfmt::skip] + let a = i8x16( + 11, 12, 3, 4, + 11, 12, 3, 4, + 11, 12, 3, 4, + 11, 12, 3, 4 + ); + + let r = i8x16( + 11, 3, 4, 12, + 11, 3, 4, 12, + 11, 3, 4, 12, + 11, 3, 4, 12 + ); + + assert_eq!(r, __msa_shf_b(a, 120)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_shf_h() { + #[rustfmt::skip] + let a = i16x8( + 11, 12, 13, 14, + 11, 12, 13, 14 + ); + + let r = i16x8(11, 14, 12, 13, 11, 14, 12, 13); + + assert_eq!(r, __msa_shf_h(a, 156)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_shf_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + + let r = i32x4(1, 3, 2, 4); + + assert_eq!(r, __msa_shf_w(a, 216)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sld_b() { + #[rustfmt::skip] + let a = i8x16( + 0, 1, 2, 3, + 4, 5, 6, 7, + 8, 9, 10, 11, + 12, 13, 14, 15 + ); + #[rustfmt::skip] + let b = i8x16( + 16, 17, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31 + ); + let r = i8x16( + 21, 22, 23, 24, + 25, 26, 27, 28, + 29, 30, 31, 0, + 1, 2, 3, 4 + ); + + assert_eq!(r, __msa_sld_b(a, b, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sld_h() { + #[rustfmt::skip] + let a = i16x8(0, 1, 2, 3, 4, 5, 6, 7); + #[rustfmt::skip] + let b = i16x8(8, 9, 10, 11, 12, 13, 14, 15); + // let c = 5 as i32; + let r = i16x8(9, 10, 11, 0, 13, 14, 15, 4); + + assert_eq!(r, __msa_sld_h(a, b, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sld_w() { + #[rustfmt::skip] + let a = i32x4(0, 1, 2, 3); + #[rustfmt::skip] + let b = i32x4(4, 5, 6, 7); + + let r = i32x4(4, 5, 6, 7); + + assert_eq!(r, __msa_sld_w(a, b, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sld_d() { + #[rustfmt::skip] + let a = i64x2(0, 1); + #[rustfmt::skip] + let b = i64x2(2, 3); + + let r = i64x2(2, 3); + + assert_eq!(r, __msa_sld_d(a, b, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sldi_b() { + #[rustfmt::skip] + let a = i8x16( + 0, 1, 2, 3, + 4, 5, 6, 7, + 8, 9, 10, 11, + 12, 13, 14, 15 + ); + #[rustfmt::skip] + let b = i8x16( + 16, 17, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31 + ); + let r = i8x16( + 21, 22, 23, 24, + 25, 26, 27, 28, + 29, 30, 31, 0, + 1, 2, 3, 4 + ); + + assert_eq!(r, __msa_sldi_b(a, b, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sldi_h() { + #[rustfmt::skip] + let a = i16x8(0, 1, 2, 3, 4, 5, 6, 7); + #[rustfmt::skip] + let b = i16x8(8, 9, 10, 11, 12, 13, 14, 15); + // let c = 5 as i32; + let r = i16x8(9, 10, 11, 0, 13, 14, 15, 4); + + assert_eq!(r, __msa_sldi_h(a, b, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sldi_w() { + #[rustfmt::skip] + let a = i32x4(0, 1, 2, 3); + #[rustfmt::skip] + let b = i32x4(4, 5, 6, 7); + + let r = i32x4(4, 5, 6, 7); + + assert_eq!(r, __msa_sldi_w(a, b, 4)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sldi_d() { + #[rustfmt::skip] + let a = i64x2(0, 1); + #[rustfmt::skip] + let b = i64x2(2, 3); + + let r = i64x2(2, 3); + + assert_eq!(r, __msa_sldi_d(a, b, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sll_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i8x16( + 16, 16, 12, 8, + 16, 16, 12, 8, + 16, 16, 12, 8, + 16, 16, 12, 8 + ); + + assert_eq!(r, __msa_sll_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sll_h() { + #[rustfmt::skip] + let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = i16x8(4, 3, 2, 1, 4, 3, 2, 1); + let r = i16x8(16, 16, 12, 8, 16, 16, 12, 8); + + assert_eq!(r, __msa_sll_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sll_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + let r = i32x4(16, 16, 12, 8); + + assert_eq!(r, __msa_sll_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sll_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(4, 3); + let r = i64x2(16, 16); + + assert_eq!(r, __msa_sll_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_slli_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = i8x16( + 4, 8, 12, 16, + 4, 8, 12, 16, + 4, 8, 12, 16, + 4, 8, 12, 16 + ); + + assert_eq!(r, __msa_slli_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_slli_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = i16x8(4, 8, 12, 16, 4, 8, 12, 16); + + assert_eq!(r, __msa_slli_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_slli_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + let r = i32x4(4, 8, 12, 16); + + assert_eq!(r, __msa_slli_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_slli_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + let r = i64x2(2, 4); + + assert_eq!(r, __msa_slli_d(a, 1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splat_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + + let r = i8x16( + 4, 4, 4, 4, + 4, 4, 4, 4, + 4, 4, 4, 4, + 4, 4, 4, 4 + ); + + assert_eq!(r, __msa_splat_b(a, 3)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splat_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 1, 2, 3, 4, + ); + + let r = i16x8(4, 4, 4, 4, 4, 4, 4, 4); + + assert_eq!(r, __msa_splat_h(a, 3)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splat_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + + let r = i32x4(4, 4, 4, 4); + + assert_eq!(r, __msa_splat_w(a, 3)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splat_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + + let r = i64x2(2, 2); + + assert_eq!(r, __msa_splat_d(a, 3)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splati_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + let r = i8x16( + 3, 3, 3, 3, + 3, 3, 3, 3, + 3, 3, 3, 3, + 3, 3, 3, 3 + ); + + assert_eq!(r, __msa_splati_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splati_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 1, 2, 3, 4, + ); + let r = i16x8(3, 3, 3, 3, 3, 3, 3, 3); + + assert_eq!(r, __msa_splati_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splati_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + let r = i32x4(3, 3, 3, 3); + + assert_eq!(r, __msa_splati_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_splati_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + let r = i64x2(2, 2); + + assert_eq!(r, __msa_splati_d(a, 1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sra_b() { + #[rustfmt::skip] + let a = i8x16( + -128, -64, -32, -16, + -8, -4, -2, -1, + 1, 2, 4, 8, + 16, 32, 64, 127 + ); + #[rustfmt::skip] + let b = i8x16( + 8, 7, 6, 5, + 4, 3, 2, 1, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + -128, -1, -1, -1, + -1, -1, -1, -1, + 1, 0, 0, 0, + 1, 4, 16, 63 + ); + + assert_eq!(r, __msa_sra_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sra_h() { + #[rustfmt::skip] + let a = i16x8( + -32768, -16384, -8192, -4096, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 15, 14, 13, 12, + 12, 13, 14, 15 + ); + let r = i16x8( + -1, -1, -1, -1, + 0, 0, 0, 0 + ); + + assert_eq!(r, __msa_sra_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sra_w() { + #[rustfmt::skip] + let a = i32x4(i32::min_value(), -1073741824, 1, 2); + #[rustfmt::skip] + let b = i32x4(16, 15, 16, 15); + let r = i32x4(-32768, -32768, 0, 0); + + assert_eq!(r, __msa_sra_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_sra_d() { + #[rustfmt::skip] + let a = i64x2(i64::min_value(), i64::max_value()); + #[rustfmt::skip] + let b = i64x2(32, 31); + let r = i64x2(-2147483648, 4294967295); + + assert_eq!(r, __msa_sra_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srai_b() { + #[rustfmt::skip] + let a = i8x16( + i8::max_value(), 125, 55, 1, + i8::max_value(), 125, 55, 1, + i8::max_value(), 125, 55, 1, + i8::max_value(), 125, 55, 1 + ); + let r = i8x16( + 31, 31, 13, 0, + 31, 31, 13, 0, + 31, 31, 13, 0, + 31, 31, 13, 0 + ); + + assert_eq!(r, __msa_srai_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srai_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 125, 55, 1, + i16::max_value(), 125, 55, 1 + ); + let r = i16x8(8191, 31, 13, 0, 8191, 31, 13, 0); + + assert_eq!(r, __msa_srai_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srai_w() { + #[rustfmt::skip] + let a = i32x4(i32::max_value(), 125, 55, 1); + let r = i32x4(536870911, 31, 13, 0); + + assert_eq!(r, __msa_srai_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srai_d() { + #[rustfmt::skip] + let a = i64x2(i64::max_value(), 55); + let r = i64x2(2305843009213693951, 13); + + assert_eq!(r, __msa_srai_d(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srar_b() { + #[rustfmt::skip] + let a = i8x16( + -128, -64, -32, -16, + -8, -4, -2, -1, + 1, 2, 4, 8, + 16, 32, 64, 127 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + -8, -8, -8, -8, + 0, 0, 0, 0, + 1, 0, 0, 0, + 1, 4, 16, 64 + ); + + assert_eq!(r, __msa_srar_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srar_h() { + #[rustfmt::skip] + let a = i16x8( + i16::min_value(), -16384, -8192, -4096, + 150, 50, 25, 15 + ); + #[rustfmt::skip] + let b = i16x8( + 4, 3, 2, 1, + 1, 2, 3, 4 + ); + let r = i16x8(-2048, -2048, -2048, -2048, 75, 13, 3, 1); + + assert_eq!(r, __msa_srar_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srar_w() { + #[rustfmt::skip] + let a = i32x4(i32::min_value(), -1073741824, 100, 50); + #[rustfmt::skip] + let b = i32x4(16, 15, 1, 2); + let r = i32x4(-32768, -32768, 50, 13); + + assert_eq!(r, __msa_srar_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srar_d() { + #[rustfmt::skip] + let a = i64x2(i64::min_value(), i64::max_value()); + #[rustfmt::skip] + let b = i64x2(32, 31); + let r = i64x2(-2147483648, 4294967296); + + assert_eq!(r, __msa_srar_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srari_b() { + #[rustfmt::skip] + let a = i8x16( + 125, i8::max_value(), 55, 1, + 125, i8::max_value(), 55, 1, + 125, i8::max_value(), 55, 1, + 125, i8::max_value(), 55, 1 + ); + let r = i8x16( + 31, 32, 14, 0, + 31, 32, 14, 0, + 31, 32, 14, 0, + 31, 32, 14, 0 + ); + + assert_eq!(r, __msa_srari_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srari_h() { + #[rustfmt::skip] + let a = i16x8(2155, 1155, 155, 1, 2155, 1155, 155, 1); + let r = i16x8(539, 289, 39, 0, 539, 289, 39, 0); + + assert_eq!(r, __msa_srari_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srari_w() { + #[rustfmt::skip] + let a = i32x4(211111155, 111111155, 11111155, 1); + let r = i32x4(52777789, 27777789, 2777789, 0); + + assert_eq!(r, __msa_srari_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srari_d() { + #[rustfmt::skip] + let a = i64x2(211111111155, 111111111155); + let r = i64x2(52777777789, 27777777789); + + assert_eq!(r, __msa_srari_d(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srl_b() { + #[rustfmt::skip] + let a = i8x16( + -128, -64, -32, -16, + -8, -4, -2, -1, + 1, 2, 4, 8, + 16, 32, 64, 127 + ); + #[rustfmt::skip] + let b = i8x16( + 8, 7, 6, 5, + 4, 3, 2, 1, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + -128, 1, 3, 7, + 15, 31, 63, 127, + 1, 0, 0, 0, + 1, 4, 16, 63 + ); + + assert_eq!(r, __msa_srl_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srl_h() { + #[rustfmt::skip] + let a = i16x8( + -32768, -16384, -8192, -4096, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 15, 14, 13, 12, + 4, 3, 2, 1 + ); + let r = i16x8(1, 3, 7, 15, 0, 0, 0, 2); + + assert_eq!(r, __msa_srl_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srl_w() { + #[rustfmt::skip] + let a = i32x4(i32::min_value(), -1073741824, 1, 2); + #[rustfmt::skip] + let b = i32x4(16, 15, 16, 15); + let r = i32x4(32768, 98304, 0, 0); + + assert_eq!(r, __msa_srl_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srl_d() { + #[rustfmt::skip] + let a = i64x2(i64::min_value(), i64::max_value()); + #[rustfmt::skip] + let b = i64x2(32, 31); + let r = i64x2(2147483648, 4294967295); + + assert_eq!(r, __msa_srl_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srli_b() { + #[rustfmt::skip] + let a = i8x16( + 25, 50, 100, 127, + 25, 50, 100, 127, + 25, 50, 100, 127, + 25, 50, 100, 127 + ); + let r = i8x16( + 6, 12, 25, 31, + 6, 12, 25, 31, + 6, 12, 25, 31, + 6, 12, 25, 31 + ); + + assert_eq!(r, __msa_srli_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srli_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 3276, 100, 127, + i16::max_value(), 3276, 100, 127 + ); + let r = i16x8( + 8191, 819, 25, 31, + 8191, 819, 25, 31 + ); + + assert_eq!(r, __msa_srli_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srli_w() { + #[rustfmt::skip] + let a = i32x4(100, i32::max_value(), 100, i32::max_value()); + let r = i32x4(25, 536870911, 25, 536870911); + + assert_eq!(r, __msa_srli_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srli_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::max_value()); + #[rustfmt::skip] + let r = i64x2(50, 4611686018427387903); + + assert_eq!(r, __msa_srli_d(a, 1)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlr_b() { + #[rustfmt::skip] + let a = i8x16( + -128, -64, -32, -16, + -8, -4, -2, -1, + 1, 2, 4, 8, + 16, 32, 64, 127 + ); + #[rustfmt::skip] + let b = i8x16( + 8, 7, 6, 5, + 4, 3, 2, 1, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = i8x16( + -128, 2, 4, 8, + 16, 32, 64, -128, + 1, 0, 0, 0, + 1, 4, 16, 64 + ); + + assert_eq!(r, __msa_srlr_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlr_h() { + #[rustfmt::skip] + let a = i16x8( + -32768, -16384, -8192, -4096, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 15, 14, 13, 12, + 4, 3, 2, 1 + ); + let r = i16x8(1, 3, 7, 15, 0, 0, 1, 2); + + assert_eq!(r, __msa_srlr_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlr_w() { + #[rustfmt::skip] + let a = i32x4(i32::min_value(), -1073741824, 1, 2); + #[rustfmt::skip] + let b = i32x4(16, 15, 16, 15); + let r = i32x4(32768, 98304, 0, 0); + + assert_eq!(r, __msa_srlr_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlr_d() { + #[rustfmt::skip] + let a = i64x2(i64::min_value(), i64::max_value()); + #[rustfmt::skip] + let b = i64x2(32, 31); + let r = i64x2(2147483648, 4294967296); + + assert_eq!(r, __msa_srlr_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlri_b() { + #[rustfmt::skip] + let a = i8x16( + 25, 50, 100, i8::max_value(), + 25, 50, 100, i8::max_value(), + 25, 50, 100, i8::max_value(), + 25, 50, 100, i8::max_value() + ); + let r = i8x16( + 6, 13, 25, 32, + 6, 13, 25, 32, + 6, 13, 25, 32, + 6, 13, 25, 32 + ); + + assert_eq!(r, __msa_srlri_b(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlri_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 3276, 100, 127, + i16::max_value(), 3276, 100, 127 + ); + let r = i16x8(8192, 819, 25, 32, 8192, 819, 25, 32); + + assert_eq!(r, __msa_srlri_h(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlri_w() { + #[rustfmt::skip] + let a = i32x4(100, 150, 200, i32::max_value()); + let r = i32x4(25, 38, 50, 536870912); + + assert_eq!(r, __msa_srlri_w(a, 2)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_srlri_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::max_value()); + #[rustfmt::skip] + let r = i64x2(50, 4611686018427387904); + + assert_eq!(r, __msa_srlri_d(a, 1)); + } + + + #[simd_test(enable = "msa")] + unsafe fn test_msa_st_b() { + #[rustfmt::skip] + let a = i8x16( + 13, 14, 15, 16, + 17, 18, 19, 20, + 21, 22, 23, 24, + 25, 26, 27, 28 + ); + let mut arr : [i8; 16] = [ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0 + ]; + #[rustfmt::skip] + let r : [i8; 16] = [ + 13, 14, 15, 16, + 17, 18, 19, 20, + 21, 22, 23, 24, + 25, 26, 27, 28 + ]; + __msa_st_b(a, arr.as_mut_ptr(), 0); + assert_eq!(arr, r); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_st_h() { + #[rustfmt::skip] + let a = i16x8(13, 14, 15, 16, 17, 18, 19, 20); + let mut arr : [i16; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; + #[rustfmt::skip] + let r : [i16; 8] = [13, 14, 15, 16, 17, 18, 19, 20]; + __msa_st_h(a, arr.as_mut_ptr() as *mut i8, 0); + assert_eq!(arr, r); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_st_w() { + #[rustfmt::skip] + let a = i32x4(13, 14, 15, 16); + let mut arr : [i32; 4] = [0, 0, 0, 0]; + #[rustfmt::skip] + let r : [i32; 4] = [13, 14, 15, 16]; + __msa_st_w(a, arr.as_mut_ptr() as *mut i8, 0); + assert_eq!(arr, r); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_st_d() { + #[rustfmt::skip] + let a = i64x2(13, 14); + let mut arr : [i64; 2] = [0, 0]; + #[rustfmt::skip] + let r : [i64; 2] = [13, 14]; + __msa_st_d(a, arr.as_mut_ptr() as *mut i8, 0); + assert_eq!(arr, r); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_s_b() { + #[rustfmt::skip] + let a = i8x16( + i8::min_value(), -2, -3, -4, + i8::min_value(), -2, -3, -4, + i8::min_value(), -2, -3, -4, + i8::min_value(), -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + 6, -7, 8, -9, + 6, -7, 8, -9, + 6, -7, 8, -9, + 6, -7, 8, -9 + ); + let r = i8x16( + i8::min_value(), 5, -11, 5, + i8::min_value(), 5, -11, 5, + i8::min_value(), 5, -11, 5, + i8::min_value(), 5, -11, 5 + ); + + assert_eq!(r, __msa_subs_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_s_h() { + #[rustfmt::skip] + let a = i16x8( + i16::min_value(), -2, -3, -4, + i16::min_value(), -2, -3, -4 + ); + #[rustfmt::skip] + let b = i16x8(6, -7, 8, -9, 6, -7, 8, -9); + let r = i16x8( + i16::min_value(), 5, -11, 5, + i16::min_value(), 5, -11, 5 + ); + + assert_eq!(r, __msa_subs_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_s_w() { + #[rustfmt::skip] + let a = i32x4(i32::min_value(), -2, -3, -4); + #[rustfmt::skip] + let b = i32x4(6, -7, 8, -9); + let r = i32x4(i32::min_value(), 5, -11, 5); + + assert_eq!(r, __msa_subs_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_s_d() { + #[rustfmt::skip] + let a = i64x2(i64::min_value(), -2); + #[rustfmt::skip] + let b = i64x2(6, -7); + let r = i64x2(i64::min_value(), 5); + + assert_eq!(r, __msa_subs_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_u_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9, + 6, 7, 8, 9 + ); + let r = u8x16( + 249, 0, 0, 0, + 249, 0, 0, 0, + 249, 0, 0, 0, + 249, 0, 0, 0 + ); + + assert_eq!(r, __msa_subs_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_u_h() { + #[rustfmt::skip] + let a = u16x8( + u16::max_value(), 2, 3, 4, + u16::max_value(), 2, 3, 4 + ); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let r = u16x8(65529, 0, 0, 0, 65529, 0, 0, 0); + + assert_eq!(r, __msa_subs_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_u_w() { + #[rustfmt::skip] + let a = u32x4(u32::max_value(), 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 9); + let r = u32x4(4294967289, 0, 0, 0); + + assert_eq!(r, __msa_subs_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subs_u_d() { + #[rustfmt::skip] + let a = u64x2(u64::max_value(), 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = u64x2(18446744073709551609, 0); + + assert_eq!(r, __msa_subs_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsus_u_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9, + -6, -7, -8, -9 + ); + let r = u8x16( + 255, 9, 11, 13, + 255, 9, 11, 13, + 255, 9, 11, 13, + 255, 9, 11, 13 + ); + + assert_eq!(r, __msa_subsus_u_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsus_u_h() { + #[rustfmt::skip] + let a = u16x8( + u16::max_value(), 2, 3, 4, + u16::max_value(), 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8(-6, -7, -8, -9, -6, -7, -8, -9); + let r = u16x8(65535, 9, 11, 13, 65535, 9, 11, 13); + + assert_eq!(r, __msa_subsus_u_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsus_u_w() { + #[rustfmt::skip] + let a = u32x4(u32::max_value(), 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(-6, -7, -8, -9); + let r = u32x4(4294967295, 9, 11, 13); + + assert_eq!(r, __msa_subsus_u_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsus_u_d() { + #[rustfmt::skip] + let a = u64x2(u64::max_value(), 2); + #[rustfmt::skip] + let b = i64x2(-6, -7); + let r = u64x2(18446744073709551615, 9); + + assert_eq!(r, __msa_subsus_u_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsuu_s_b() { + #[rustfmt::skip] + let a = u8x16( + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4, + u8::max_value(), 2, 3, 4 + ); + #[rustfmt::skip] + let b = u8x16( + 6, 7, 8, u8::max_value(), + 6, 7, 8, u8::max_value(), + 6, 7, 8, u8::max_value(), + 6, 7, 8, u8::max_value() + ); + let r = i8x16( + 127, -5, -5, -128, + 127, -5, -5, -128, + 127, -5, -5, -128, + 127, -5, -5, -128 + ); + + assert_eq!(r, __msa_subsuu_s_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsuu_s_h() { + #[rustfmt::skip] + let a = u16x8( + u16::max_value(), 2, 3, + 4, u16::max_value(), 2, 3, 4 + ); + #[rustfmt::skip] + let b = u16x8(6, 7, 8, 65535, 6, 7, 8, 65535); + let r = i16x8(32767, -5, -5, -32768, 32767, -5, -5, -32768); + + assert_eq!(r, __msa_subsuu_s_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsuu_s_w() { + #[rustfmt::skip] + let a = u32x4(u32::max_value(), 2, 3, 4); + #[rustfmt::skip] + let b = u32x4(6, 7, 8, 4294967295); + let r = i32x4(2147483647, -5, -5, -2147483648); + + assert_eq!(r, __msa_subsuu_s_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subsuu_s_d() { + #[rustfmt::skip] + let a = u64x2(u64::max_value(), 2); + #[rustfmt::skip] + let b = u64x2(6, 7); + let r = i64x2(i64::max_value(), -5); + + assert_eq!(r, __msa_subsuu_s_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subv_b() { + #[rustfmt::skip] + let a = i8x16( + i8::min_value(), -2, -3, -4, + i8::min_value(), -2, -3, -4, + i8::min_value(), -2, -3, -4, + i8::min_value(), -2, -3, -4 + ); + #[rustfmt::skip] + let b = i8x16( + 6, -7, 8, -9, + 6, -7, 8, -9, + 6, -7, 8, -9, + 6, -7, 8, -9 + ); + let r = i8x16( + 122, 5, -11, 5, + 122, 5, -11, 5, + 122, 5, -11, 5, + 122, 5, -11, 5 + ); + + assert_eq!(r, __msa_subv_b(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subv_h() { + #[rustfmt::skip] + let a = i16x8( + i16::min_value(), -2, -3, -4, + i16::min_value(), -2, -3, -4 + ); + #[rustfmt::skip] + let b = i16x8(6, -7, 8, -9, 6, -7, 8, -9); + let r = i16x8(32762, 5, -11, 5, 32762, 5, -11, 5); + + assert_eq!(r, __msa_subv_h(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subv_w() { + #[rustfmt::skip] + let a = i32x4(i32::min_value(), -2, -3, -4); + #[rustfmt::skip] + let b = i32x4(6, -7, 8, -9); + let r = i32x4(2147483642, 5, -11, 5); + + assert_eq!(r, __msa_subv_w(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subv_d() { + #[rustfmt::skip] + let a = i64x2(i64::max_value(), -2); + #[rustfmt::skip] + let b = i64x2(6, -7); + let r = i64x2(9223372036854775801, 5); + + assert_eq!(r, __msa_subv_d(a, b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subvi_b() { + #[rustfmt::skip] + let a = i8x16( + 100, i8::max_value(), 50, i8::min_value(), + 100, i8::max_value(), 50, i8::min_value(), + 100, i8::max_value(), 50, i8::min_value(), + 100, i8::max_value(), 50, i8::min_value() + ); + let r = i8x16( + 95, 122, 45, 123, + 95, 122, 45, 123, + 95, 122, 45, 123, + 95, 122, 45, 123 + ); + + assert_eq!(r, __msa_subvi_b(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subvi_h() { + #[rustfmt::skip] + let a = i16x8( + i16::max_value(), 3276, -100, i16::min_value(), + i16::max_value(), 3276, -100, i16::min_value() + ); + let r = i16x8( + 32762, 3271, -105, 32763, + 32762, 3271, -105, 32763 + ); + + assert_eq!(r, __msa_subvi_h(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subvi_w() { + #[rustfmt::skip] + let a = i32x4(100, 150, 200, i32::max_value()); + let r = i32x4(95, 145, 195, 2147483642); + + assert_eq!(r, __msa_subvi_w(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_subvi_d() { + #[rustfmt::skip] + let a = i64x2(100, i64::max_value()); + #[rustfmt::skip] + let r = i64x2(95, 9223372036854775802); + + assert_eq!(r, __msa_subvi_d(a, 5)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_vshf_b() { + #[rustfmt::skip] + let a = i8x16( + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + #[rustfmt::skip] + let c = i8x16( + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i8x16( + 3, 2, 1, 4, + 3, 2, 1, 4, + 3, 2, 1, 4, + 3, 2, 1, 4 + ); + + assert_eq!(r, __msa_vshf_b(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_vshf_h() { + #[rustfmt::skip] + let a = i16x8( + 1, 2, 3, 4, + 1, 2, 3, 4 + ); + #[rustfmt::skip] + let b = i16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + #[rustfmt::skip] + let c = i16x8( + 4, 3, 2, 1, + 4, 3, 2, 1 + ); + let r = i16x8(3, 2, 1, 4, 3, 2, 1, 4); + + assert_eq!(r, __msa_vshf_h(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_vshf_w() { + #[rustfmt::skip] + let a = i32x4(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4(4, 3, 2, 1); + #[rustfmt::skip] + let c = i32x4(4, 3, 2, 1); + let r = i32x4(3, 2, 1, 4); + + assert_eq!(r, __msa_vshf_w(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_vshf_d() { + #[rustfmt::skip] + let a = i64x2(1, 2); + #[rustfmt::skip] + let b = i64x2(4, 3); + #[rustfmt::skip] + let c = i64x2(4, 3); + let r = i64x2(3, 4); + + assert_eq!(r, __msa_vshf_d(a, b, c)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_xor_v() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + #[rustfmt::skip] + let b = u8x16( + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 + ); + let r = u8x16( + 17, 13, 13, 9, + 9, 13, 13, 1, + 1, 13, 13, 9, + 9, 13, 13, 17 + ); + + assert_eq!(r, __msa_xor_v(a,b)); + } + + #[simd_test(enable = "msa")] + unsafe fn test_msa_xori_b() { + #[rustfmt::skip] + let a = u8x16( + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16 + ); + let r = u8x16( + 5, 6, 7, 0, + 1, 2, 3, 12, + 13, 14, 15, 8, + 9, 10, 11, 20 ); - let r = i8x16(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5); - assert_eq!(r, msa::__msa_add_a_b(a, b)); + assert_eq!(r, __msa_xori_b(a, 4)); } } diff --git a/crates/core_arch/src/mips/msa/macros.rs b/crates/core_arch/src/mips/msa/macros.rs new file mode 100644 index 0000000000..c30583e854 --- /dev/null +++ b/crates/core_arch/src/mips/msa/macros.rs @@ -0,0 +1,4347 @@ +//! Utility macros. + +macro_rules! constify_imm_s13 { + ($imm_s13:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm_s13) & 0b1_1111_1111_1111 { + 0 => $expand!(0), + 8 => $expand!(8), + 16 => $expand!(16), + 24 => $expand!(24), + 32 => $expand!(32), + 40 => $expand!(40), + 48 => $expand!(48), + 56 => $expand!(56), + 64 => $expand!(64), + 72 => $expand!(72), + 80 => $expand!(80), + 88 => $expand!(88), + 96 => $expand!(96), + 104 => $expand!(104), + 112 => $expand!(112), + 120 => $expand!(120), + 128 => $expand!(128), + 136 => $expand!(136), + 144 => $expand!(144), + 152 => $expand!(152), + 160 => $expand!(160), + 168 => $expand!(168), + 176 => $expand!(176), + 184 => $expand!(184), + 192 => $expand!(192), + 200 => $expand!(200), + 208 => $expand!(208), + 216 => $expand!(216), + 224 => $expand!(224), + 232 => $expand!(232), + 240 => $expand!(240), + 248 => $expand!(248), + 256 => $expand!(256), + 264 => $expand!(264), + 272 => $expand!(272), + 280 => $expand!(280), + 288 => $expand!(288), + 296 => $expand!(296), + 304 => $expand!(304), + 312 => $expand!(312), + 320 => $expand!(320), + 328 => $expand!(328), + 336 => $expand!(336), + 344 => $expand!(344), + 352 => $expand!(352), + 360 => $expand!(360), + 368 => $expand!(368), + 376 => $expand!(376), + 384 => $expand!(384), + 392 => $expand!(392), + 400 => $expand!(400), + 408 => $expand!(408), + 416 => $expand!(416), + 424 => $expand!(424), + 432 => $expand!(432), + 440 => $expand!(440), + 448 => $expand!(448), + 456 => $expand!(456), + 464 => $expand!(464), + 472 => $expand!(472), + 480 => $expand!(480), + 488 => $expand!(488), + 496 => $expand!(496), + 504 => $expand!(504), + 512 => $expand!(512), + 520 => $expand!(520), + 528 => $expand!(528), + 536 => $expand!(536), + 544 => $expand!(544), + 552 => $expand!(552), + 560 => $expand!(560), + 568 => $expand!(568), + 576 => $expand!(576), + 584 => $expand!(584), + 592 => $expand!(592), + 600 => $expand!(600), + 608 => $expand!(608), + 616 => $expand!(616), + 624 => $expand!(624), + 632 => $expand!(632), + 640 => $expand!(640), + 648 => $expand!(648), + 656 => $expand!(656), + 664 => $expand!(664), + 672 => $expand!(672), + 680 => $expand!(680), + 688 => $expand!(688), + 696 => $expand!(696), + 704 => $expand!(704), + 712 => $expand!(712), + 720 => $expand!(720), + 728 => $expand!(728), + 736 => $expand!(736), + 744 => $expand!(744), + 752 => $expand!(752), + 760 => $expand!(760), + 768 => $expand!(768), + 776 => $expand!(776), + 784 => $expand!(784), + 792 => $expand!(792), + 800 => $expand!(800), + 808 => $expand!(808), + 816 => $expand!(816), + 824 => $expand!(824), + 832 => $expand!(832), + 840 => $expand!(840), + 848 => $expand!(848), + 856 => $expand!(856), + 864 => $expand!(864), + 872 => $expand!(872), + 880 => $expand!(880), + 888 => $expand!(888), + 896 => $expand!(896), + 904 => $expand!(904), + 912 => $expand!(912), + 920 => $expand!(920), + 928 => $expand!(928), + 936 => $expand!(936), + 944 => $expand!(944), + 952 => $expand!(952), + 960 => $expand!(960), + 968 => $expand!(968), + 976 => $expand!(976), + 984 => $expand!(984), + 992 => $expand!(992), + 1000 => $expand!(1000), + 1008 => $expand!(1008), + 1016 => $expand!(1016), + 1024 => $expand!(1024), + 1032 => $expand!(1032), + 1040 => $expand!(1040), + 1048 => $expand!(1048), + 1056 => $expand!(1056), + 1064 => $expand!(1064), + 1072 => $expand!(1072), + 1080 => $expand!(1080), + 1088 => $expand!(1088), + 1096 => $expand!(1096), + 1104 => $expand!(1104), + 1112 => $expand!(1112), + 1120 => $expand!(1120), + 1128 => $expand!(1128), + 1136 => $expand!(1136), + 1144 => $expand!(1144), + 1152 => $expand!(1152), + 1160 => $expand!(1160), + 1168 => $expand!(1168), + 1176 => $expand!(1176), + 1184 => $expand!(1184), + 1192 => $expand!(1192), + 1200 => $expand!(1200), + 1208 => $expand!(1208), + 1216 => $expand!(1216), + 1224 => $expand!(1224), + 1232 => $expand!(1232), + 1240 => $expand!(1240), + 1248 => $expand!(1248), + 1256 => $expand!(1256), + 1264 => $expand!(1264), + 1272 => $expand!(1272), + 1280 => $expand!(1280), + 1288 => $expand!(1288), + 1296 => $expand!(1296), + 1304 => $expand!(1304), + 1312 => $expand!(1312), + 1320 => $expand!(1320), + 1328 => $expand!(1328), + 1336 => $expand!(1336), + 1344 => $expand!(1344), + 1352 => $expand!(1352), + 1360 => $expand!(1360), + 1368 => $expand!(1368), + 1376 => $expand!(1376), + 1384 => $expand!(1384), + 1392 => $expand!(1392), + 1400 => $expand!(1400), + 1408 => $expand!(1408), + 1416 => $expand!(1416), + 1424 => $expand!(1424), + 1432 => $expand!(1432), + 1440 => $expand!(1440), + 1448 => $expand!(1448), + 1456 => $expand!(1456), + 1464 => $expand!(1464), + 1472 => $expand!(1472), + 1480 => $expand!(1480), + 1488 => $expand!(1488), + 1496 => $expand!(1496), + 1504 => $expand!(1504), + 1512 => $expand!(1512), + 1520 => $expand!(1520), + 1528 => $expand!(1528), + 1536 => $expand!(1536), + 1544 => $expand!(1544), + 1552 => $expand!(1552), + 1560 => $expand!(1560), + 1568 => $expand!(1568), + 1576 => $expand!(1576), + 1584 => $expand!(1584), + 1592 => $expand!(1592), + 1600 => $expand!(1600), + 1608 => $expand!(1608), + 1616 => $expand!(1616), + 1624 => $expand!(1624), + 1632 => $expand!(1632), + 1640 => $expand!(1640), + 1648 => $expand!(1648), + 1656 => $expand!(1656), + 1664 => $expand!(1664), + 1672 => $expand!(1672), + 1680 => $expand!(1680), + 1688 => $expand!(1688), + 1696 => $expand!(1696), + 1704 => $expand!(1704), + 1712 => $expand!(1712), + 1720 => $expand!(1720), + 1728 => $expand!(1728), + 1736 => $expand!(1736), + 1744 => $expand!(1744), + 1752 => $expand!(1752), + 1760 => $expand!(1760), + 1768 => $expand!(1768), + 1776 => $expand!(1776), + 1784 => $expand!(1784), + 1792 => $expand!(1792), + 1800 => $expand!(1800), + 1808 => $expand!(1808), + 1816 => $expand!(1816), + 1824 => $expand!(1824), + 1832 => $expand!(1832), + 1840 => $expand!(1840), + 1848 => $expand!(1848), + 1856 => $expand!(1856), + 1864 => $expand!(1864), + 1872 => $expand!(1872), + 1880 => $expand!(1880), + 1888 => $expand!(1888), + 1896 => $expand!(1896), + 1904 => $expand!(1904), + 1912 => $expand!(1912), + 1920 => $expand!(1920), + 1928 => $expand!(1928), + 1936 => $expand!(1936), + 1944 => $expand!(1944), + 1952 => $expand!(1952), + 1960 => $expand!(1960), + 1968 => $expand!(1968), + 1976 => $expand!(1976), + 1984 => $expand!(1984), + 1992 => $expand!(1992), + 2000 => $expand!(2000), + 2008 => $expand!(2008), + 2016 => $expand!(2016), + 2024 => $expand!(2024), + 2032 => $expand!(2032), + 2040 => $expand!(2040), + 2048 => $expand!(2048), + 2056 => $expand!(2056), + 2064 => $expand!(2064), + 2072 => $expand!(2072), + 2080 => $expand!(2080), + 2088 => $expand!(2088), + 2096 => $expand!(2096), + 2104 => $expand!(2104), + 2112 => $expand!(2112), + 2120 => $expand!(2120), + 2128 => $expand!(2128), + 2136 => $expand!(2136), + 2144 => $expand!(2144), + 2152 => $expand!(2152), + 2160 => $expand!(2160), + 2168 => $expand!(2168), + 2176 => $expand!(2176), + 2184 => $expand!(2184), + 2192 => $expand!(2192), + 2200 => $expand!(2200), + 2208 => $expand!(2208), + 2216 => $expand!(2216), + 2224 => $expand!(2224), + 2232 => $expand!(2232), + 2240 => $expand!(2240), + 2248 => $expand!(2248), + 2256 => $expand!(2256), + 2264 => $expand!(2264), + 2272 => $expand!(2272), + 2280 => $expand!(2280), + 2288 => $expand!(2288), + 2296 => $expand!(2296), + 2304 => $expand!(2304), + 2312 => $expand!(2312), + 2320 => $expand!(2320), + 2328 => $expand!(2328), + 2336 => $expand!(2336), + 2344 => $expand!(2344), + 2352 => $expand!(2352), + 2360 => $expand!(2360), + 2368 => $expand!(2368), + 2376 => $expand!(2376), + 2384 => $expand!(2384), + 2392 => $expand!(2392), + 2400 => $expand!(2400), + 2408 => $expand!(2408), + 2416 => $expand!(2416), + 2424 => $expand!(2424), + 2432 => $expand!(2432), + 2440 => $expand!(2440), + 2448 => $expand!(2448), + 2456 => $expand!(2456), + 2464 => $expand!(2464), + 2472 => $expand!(2472), + 2480 => $expand!(2480), + 2488 => $expand!(2488), + 2496 => $expand!(2496), + 2504 => $expand!(2504), + 2512 => $expand!(2512), + 2520 => $expand!(2520), + 2528 => $expand!(2528), + 2536 => $expand!(2536), + 2544 => $expand!(2544), + 2552 => $expand!(2552), + 2560 => $expand!(2560), + 2568 => $expand!(2568), + 2576 => $expand!(2576), + 2584 => $expand!(2584), + 2592 => $expand!(2592), + 2600 => $expand!(2600), + 2608 => $expand!(2608), + 2616 => $expand!(2616), + 2624 => $expand!(2624), + 2632 => $expand!(2632), + 2640 => $expand!(2640), + 2648 => $expand!(2648), + 2656 => $expand!(2656), + 2664 => $expand!(2664), + 2672 => $expand!(2672), + 2680 => $expand!(2680), + 2688 => $expand!(2688), + 2696 => $expand!(2696), + 2704 => $expand!(2704), + 2712 => $expand!(2712), + 2720 => $expand!(2720), + 2728 => $expand!(2728), + 2736 => $expand!(2736), + 2744 => $expand!(2744), + 2752 => $expand!(2752), + 2760 => $expand!(2760), + 2768 => $expand!(2768), + 2776 => $expand!(2776), + 2784 => $expand!(2784), + 2792 => $expand!(2792), + 2800 => $expand!(2800), + 2808 => $expand!(2808), + 2816 => $expand!(2816), + 2824 => $expand!(2824), + 2832 => $expand!(2832), + 2840 => $expand!(2840), + 2848 => $expand!(2848), + 2856 => $expand!(2856), + 2864 => $expand!(2864), + 2872 => $expand!(2872), + 2880 => $expand!(2880), + 2888 => $expand!(2888), + 2896 => $expand!(2896), + 2904 => $expand!(2904), + 2912 => $expand!(2912), + 2920 => $expand!(2920), + 2928 => $expand!(2928), + 2936 => $expand!(2936), + 2944 => $expand!(2944), + 2952 => $expand!(2952), + 2960 => $expand!(2960), + 2968 => $expand!(2968), + 2976 => $expand!(2976), + 2984 => $expand!(2984), + 2992 => $expand!(2992), + 3000 => $expand!(3000), + 3008 => $expand!(3008), + 3016 => $expand!(3016), + 3024 => $expand!(3024), + 3032 => $expand!(3032), + 3040 => $expand!(3040), + 3048 => $expand!(3048), + 3056 => $expand!(3056), + 3064 => $expand!(3064), + 3072 => $expand!(3072), + 3080 => $expand!(3080), + 3088 => $expand!(3088), + 3096 => $expand!(3096), + 3104 => $expand!(3104), + 3112 => $expand!(3112), + 3120 => $expand!(3120), + 3128 => $expand!(3128), + 3136 => $expand!(3136), + 3144 => $expand!(3144), + 3152 => $expand!(3152), + 3160 => $expand!(3160), + 3168 => $expand!(3168), + 3176 => $expand!(3176), + 3184 => $expand!(3184), + 3192 => $expand!(3192), + 3200 => $expand!(3200), + 3208 => $expand!(3208), + 3216 => $expand!(3216), + 3224 => $expand!(3224), + 3232 => $expand!(3232), + 3240 => $expand!(3240), + 3248 => $expand!(3248), + 3256 => $expand!(3256), + 3264 => $expand!(3264), + 3272 => $expand!(3272), + 3280 => $expand!(3280), + 3288 => $expand!(3288), + 3296 => $expand!(3296), + 3304 => $expand!(3304), + 3312 => $expand!(3312), + 3320 => $expand!(3320), + 3328 => $expand!(3328), + 3336 => $expand!(3336), + 3344 => $expand!(3344), + 3352 => $expand!(3352), + 3360 => $expand!(3360), + 3368 => $expand!(3368), + 3376 => $expand!(3376), + 3384 => $expand!(3384), + 3392 => $expand!(3392), + 3400 => $expand!(3400), + 3408 => $expand!(3408), + 3416 => $expand!(3416), + 3424 => $expand!(3424), + 3432 => $expand!(3432), + 3440 => $expand!(3440), + 3448 => $expand!(3448), + 3456 => $expand!(3456), + 3464 => $expand!(3464), + 3472 => $expand!(3472), + 3480 => $expand!(3480), + 3488 => $expand!(3488), + 3496 => $expand!(3496), + 3504 => $expand!(3504), + 3512 => $expand!(3512), + 3520 => $expand!(3520), + 3528 => $expand!(3528), + 3536 => $expand!(3536), + 3544 => $expand!(3544), + 3552 => $expand!(3552), + 3560 => $expand!(3560), + 3568 => $expand!(3568), + 3576 => $expand!(3576), + 3584 => $expand!(3584), + 3592 => $expand!(3592), + 3600 => $expand!(3600), + 3608 => $expand!(3608), + 3616 => $expand!(3616), + 3624 => $expand!(3624), + 3632 => $expand!(3632), + 3640 => $expand!(3640), + 3648 => $expand!(3648), + 3656 => $expand!(3656), + 3664 => $expand!(3664), + 3672 => $expand!(3672), + 3680 => $expand!(3680), + 3688 => $expand!(3688), + 3696 => $expand!(3696), + 3704 => $expand!(3704), + 3712 => $expand!(3712), + 3720 => $expand!(3720), + 3728 => $expand!(3728), + 3736 => $expand!(3736), + 3744 => $expand!(3744), + 3752 => $expand!(3752), + 3760 => $expand!(3760), + 3768 => $expand!(3768), + 3776 => $expand!(3776), + 3784 => $expand!(3784), + 3792 => $expand!(3792), + 3700 => $expand!(3700), + 3808 => $expand!(3808), + 3816 => $expand!(3816), + 3824 => $expand!(3824), + 3832 => $expand!(3832), + 3840 => $expand!(3840), + 3848 => $expand!(3848), + 3856 => $expand!(3856), + 3864 => $expand!(3864), + 3872 => $expand!(3872), + 3880 => $expand!(3880), + 3888 => $expand!(3888), + 3896 => $expand!(3896), + 3904 => $expand!(3904), + 3912 => $expand!(3912), + 3920 => $expand!(3920), + 3928 => $expand!(3928), + 3936 => $expand!(3936), + 3944 => $expand!(3944), + 3952 => $expand!(3952), + 3960 => $expand!(3960), + 3968 => $expand!(3968), + 3976 => $expand!(3976), + 3984 => $expand!(3984), + 3992 => $expand!(3992), + 4000 => $expand!(4000), + 4008 => $expand!(4008), + 4016 => $expand!(4016), + 4024 => $expand!(4024), + 4032 => $expand!(4032), + 4040 => $expand!(4040), + 4048 => $expand!(4048), + 4056 => $expand!(4056), + 4064 => $expand!(4064), + 4072 => $expand!(4072), + 4080 => $expand!(4080), + 4096 => $expand!(-4096), + 4104 => $expand!(-4088), + 4112 => $expand!(-4080), + 4120 => $expand!(-4072), + 4128 => $expand!(-4064), + 4136 => $expand!(-4056), + 4144 => $expand!(-4048), + 4152 => $expand!(-4040), + 4160 => $expand!(-4032), + 4168 => $expand!(-4024), + 4176 => $expand!(-4016), + 4184 => $expand!(-4008), + 4192 => $expand!(-4000), + 4200 => $expand!(-3992), + 4208 => $expand!(-3984), + 4216 => $expand!(-3976), + 4224 => $expand!(-3968), + 4232 => $expand!(-3960), + 4240 => $expand!(-3952), + 4248 => $expand!(-3944), + 4256 => $expand!(-3936), + 4264 => $expand!(-3928), + 4272 => $expand!(-3920), + 4280 => $expand!(-3912), + 4288 => $expand!(-3904), + 4296 => $expand!(-3896), + 4304 => $expand!(-3888), + 4312 => $expand!(-3880), + 4320 => $expand!(-3872), + 4328 => $expand!(-3864), + 4336 => $expand!(-3856), + 4344 => $expand!(-3848), + 4352 => $expand!(-3840), + 4360 => $expand!(-3832), + 4368 => $expand!(-3824), + 4376 => $expand!(-3816), + 4384 => $expand!(-3808), + 4392 => $expand!(-3800), + 4400 => $expand!(-3792), + 4408 => $expand!(-3784), + 4416 => $expand!(-3776), + 4424 => $expand!(-3768), + 4432 => $expand!(-3760), + 4440 => $expand!(-3752), + 4448 => $expand!(-3744), + 4456 => $expand!(-3736), + 4464 => $expand!(-3728), + 4472 => $expand!(-3720), + 4480 => $expand!(-3712), + 4488 => $expand!(-3704), + 4496 => $expand!(-3696), + 4504 => $expand!(-3688), + 4512 => $expand!(-3680), + 4520 => $expand!(-3672), + 4528 => $expand!(-3664), + 4536 => $expand!(-3656), + 4544 => $expand!(-3648), + 4552 => $expand!(-3640), + 4560 => $expand!(-3632), + 4568 => $expand!(-3624), + 4576 => $expand!(-3616), + 4584 => $expand!(-3608), + 4592 => $expand!(-3600), + 4600 => $expand!(-3592), + 4608 => $expand!(-3584), + 4616 => $expand!(-3576), + 4624 => $expand!(-3568), + 4632 => $expand!(-3560), + 4640 => $expand!(-3552), + 4648 => $expand!(-3544), + 4656 => $expand!(-3536), + 4664 => $expand!(-3528), + 4672 => $expand!(-3520), + 4680 => $expand!(-3512), + 4688 => $expand!(-3504), + 4696 => $expand!(-3496), + 4704 => $expand!(-3488), + 4712 => $expand!(-3480), + 4720 => $expand!(-3472), + 4728 => $expand!(-3464), + 4736 => $expand!(-3456), + 4744 => $expand!(-3448), + 4752 => $expand!(-3440), + 4760 => $expand!(-3432), + 4768 => $expand!(-3424), + 4776 => $expand!(-3416), + 4784 => $expand!(-3408), + 4792 => $expand!(-3400), + 4800 => $expand!(-3392), + 4808 => $expand!(-3384), + 4816 => $expand!(-3376), + 4824 => $expand!(-3368), + 4832 => $expand!(-3360), + 4840 => $expand!(-3352), + 4848 => $expand!(-3344), + 4856 => $expand!(-3336), + 4864 => $expand!(-3328), + 4872 => $expand!(-3320), + 4880 => $expand!(-3312), + 4888 => $expand!(-3304), + 4896 => $expand!(-3296), + 4904 => $expand!(-3288), + 4912 => $expand!(-3280), + 4920 => $expand!(-3272), + 4928 => $expand!(-3264), + 4936 => $expand!(-3256), + 4944 => $expand!(-3248), + 4952 => $expand!(-3240), + 4960 => $expand!(-3232), + 4968 => $expand!(-3224), + 4976 => $expand!(-3216), + 4984 => $expand!(-3208), + 4992 => $expand!(-3200), + 5000 => $expand!(-3192), + 5008 => $expand!(-3184), + 5016 => $expand!(-3176), + 5024 => $expand!(-3168), + 5032 => $expand!(-3160), + 5040 => $expand!(-3152), + 5048 => $expand!(-3144), + 5056 => $expand!(-3136), + 5064 => $expand!(-3128), + 5072 => $expand!(-3120), + 5080 => $expand!(-3112), + 5088 => $expand!(-3104), + 5096 => $expand!(-3096), + 5104 => $expand!(-3088), + 5112 => $expand!(-3080), + 5120 => $expand!(-3072), + 5128 => $expand!(-3064), + 5136 => $expand!(-3056), + 5144 => $expand!(-3048), + 5152 => $expand!(-3040), + 5160 => $expand!(-3032), + 5168 => $expand!(-3024), + 5176 => $expand!(-3016), + 5184 => $expand!(-3008), + 5192 => $expand!(-3000), + 5200 => $expand!(-2992), + 5208 => $expand!(-2984), + 5216 => $expand!(-2976), + 5224 => $expand!(-2968), + 5232 => $expand!(-2960), + 5240 => $expand!(-2952), + 5248 => $expand!(-2944), + 5256 => $expand!(-2936), + 5264 => $expand!(-2928), + 5272 => $expand!(-2920), + 5280 => $expand!(-2912), + 5288 => $expand!(-2904), + 5296 => $expand!(-2896), + 5304 => $expand!(-2888), + 5312 => $expand!(-2880), + 5320 => $expand!(-2872), + 5328 => $expand!(-2864), + 5336 => $expand!(-2856), + 5344 => $expand!(-2848), + 5352 => $expand!(-2840), + 5360 => $expand!(-2832), + 5368 => $expand!(-2824), + 5376 => $expand!(-2816), + 5384 => $expand!(-2808), + 5392 => $expand!(-2800), + 5400 => $expand!(-2792), + 5408 => $expand!(-2784), + 5416 => $expand!(-2776), + 5424 => $expand!(-2768), + 5432 => $expand!(-2760), + 5440 => $expand!(-2752), + 5448 => $expand!(-2744), + 5456 => $expand!(-2736), + 5464 => $expand!(-2728), + 5472 => $expand!(-2720), + 5480 => $expand!(-2712), + 5488 => $expand!(-2704), + 5496 => $expand!(-2696), + 5504 => $expand!(-2688), + 5512 => $expand!(-2680), + 5520 => $expand!(-2672), + 5528 => $expand!(-2664), + 5536 => $expand!(-2656), + 5544 => $expand!(-2648), + 5552 => $expand!(-2640), + 5560 => $expand!(-2632), + 5568 => $expand!(-2624), + 5576 => $expand!(-2616), + 5584 => $expand!(-2608), + 5592 => $expand!(-2600), + 5600 => $expand!(-2592), + 5608 => $expand!(-2584), + 5616 => $expand!(-2576), + 5624 => $expand!(-2568), + 5632 => $expand!(-2560), + 5640 => $expand!(-2552), + 5648 => $expand!(-2544), + 5656 => $expand!(-2536), + 5664 => $expand!(-2528), + 5672 => $expand!(-2520), + 5680 => $expand!(-2512), + 5688 => $expand!(-2504), + 5696 => $expand!(-2496), + 5704 => $expand!(-2488), + 5712 => $expand!(-2480), + 5720 => $expand!(-2472), + 5728 => $expand!(-2464), + 5736 => $expand!(-2456), + 5744 => $expand!(-2448), + 5752 => $expand!(-2440), + 5760 => $expand!(-2432), + 5768 => $expand!(-2424), + 5776 => $expand!(-2416), + 5784 => $expand!(-2408), + 5792 => $expand!(-2400), + 5800 => $expand!(-2392), + 5808 => $expand!(-2384), + 5816 => $expand!(-2376), + 5824 => $expand!(-2368), + 5832 => $expand!(-2360), + 5840 => $expand!(-2352), + 5848 => $expand!(-2344), + 5856 => $expand!(-2336), + 5864 => $expand!(-2328), + 5872 => $expand!(-2320), + 5880 => $expand!(-2312), + 5888 => $expand!(-2304), + 5896 => $expand!(-2296), + 5904 => $expand!(-2288), + 5912 => $expand!(-2280), + 5920 => $expand!(-2272), + 5928 => $expand!(-2264), + 5936 => $expand!(-2256), + 5944 => $expand!(-2248), + 5952 => $expand!(-2240), + 5960 => $expand!(-2232), + 5968 => $expand!(-2224), + 5976 => $expand!(-2216), + 5984 => $expand!(-2208), + 5992 => $expand!(-2200), + 6000 => $expand!(-2192), + 6008 => $expand!(-2184), + 6016 => $expand!(-2176), + 6024 => $expand!(-2168), + 6032 => $expand!(-2160), + 6040 => $expand!(-2152), + 6048 => $expand!(-2144), + 6056 => $expand!(-2136), + 6064 => $expand!(-2128), + 6072 => $expand!(-2120), + 6080 => $expand!(-2112), + 6088 => $expand!(-2104), + 6096 => $expand!(-2096), + 6104 => $expand!(-2088), + 6112 => $expand!(-2080), + 6120 => $expand!(-2072), + 6128 => $expand!(-2064), + 6136 => $expand!(-2056), + 6144 => $expand!(-2048), + 6152 => $expand!(-2040), + 6160 => $expand!(-2032), + 6168 => $expand!(-2024), + 6176 => $expand!(-2016), + 6184 => $expand!(-2008), + 6192 => $expand!(-2000), + 6200 => $expand!(-1992), + 6208 => $expand!(-1984), + 6216 => $expand!(-1976), + 6224 => $expand!(-1968), + 6232 => $expand!(-1960), + 6240 => $expand!(-1952), + 6248 => $expand!(-1944), + 6256 => $expand!(-1936), + 6264 => $expand!(-1928), + 6272 => $expand!(-1920), + 6280 => $expand!(-1912), + 6288 => $expand!(-1904), + 6296 => $expand!(-1896), + 6304 => $expand!(-1888), + 6312 => $expand!(-1880), + 6320 => $expand!(-1872), + 6328 => $expand!(-1864), + 6336 => $expand!(-1856), + 6344 => $expand!(-1848), + 6352 => $expand!(-1840), + 6360 => $expand!(-1832), + 6368 => $expand!(-1824), + 6376 => $expand!(-1816), + 6384 => $expand!(-1808), + 6392 => $expand!(-1800), + 6400 => $expand!(-1792), + 6408 => $expand!(-1784), + 6416 => $expand!(-1776), + 6424 => $expand!(-1768), + 6432 => $expand!(-1760), + 6440 => $expand!(-1752), + 6448 => $expand!(-1744), + 6456 => $expand!(-1736), + 6464 => $expand!(-1728), + 6472 => $expand!(-1720), + 6480 => $expand!(-1712), + 6488 => $expand!(-1704), + 6496 => $expand!(-1696), + 6504 => $expand!(-1688), + 6512 => $expand!(-1680), + 6520 => $expand!(-1672), + 6528 => $expand!(-1664), + 6536 => $expand!(-1656), + 6544 => $expand!(-1648), + 6552 => $expand!(-1640), + 6560 => $expand!(-1632), + 6568 => $expand!(-1624), + 6576 => $expand!(-1616), + 6584 => $expand!(-1608), + 6592 => $expand!(-1600), + 6600 => $expand!(-1592), + 6608 => $expand!(-1584), + 6616 => $expand!(-1576), + 6624 => $expand!(-1568), + 6632 => $expand!(-1560), + 6640 => $expand!(-1552), + 6648 => $expand!(-1544), + 6656 => $expand!(-1536), + 6664 => $expand!(-1528), + 6672 => $expand!(-1520), + 6680 => $expand!(-1512), + 6688 => $expand!(-1504), + 6696 => $expand!(-1496), + 6704 => $expand!(-1488), + 6712 => $expand!(-1480), + 6720 => $expand!(-1472), + 6728 => $expand!(-1464), + 6736 => $expand!(-1456), + 6744 => $expand!(-1448), + 6752 => $expand!(-1440), + 6760 => $expand!(-1432), + 6768 => $expand!(-1424), + 6776 => $expand!(-1416), + 6784 => $expand!(-1408), + 6792 => $expand!(-1400), + 6800 => $expand!(-1392), + 6808 => $expand!(-1384), + 6816 => $expand!(-1376), + 6824 => $expand!(-1368), + 6832 => $expand!(-1360), + 6840 => $expand!(-1352), + 6848 => $expand!(-1344), + 6856 => $expand!(-1336), + 6864 => $expand!(-1328), + 6872 => $expand!(-1320), + 6880 => $expand!(-1312), + 6888 => $expand!(-1304), + 6896 => $expand!(-1296), + 6904 => $expand!(-1288), + 6912 => $expand!(-1280), + 6920 => $expand!(-1272), + 6928 => $expand!(-1264), + 6936 => $expand!(-1256), + 6944 => $expand!(-1248), + 6952 => $expand!(-1240), + 6960 => $expand!(-1232), + 6968 => $expand!(-1224), + 6976 => $expand!(-1216), + 6984 => $expand!(-1208), + 6992 => $expand!(-1200), + 6900 => $expand!(-1192), + 7008 => $expand!(-1184), + 7016 => $expand!(-1176), + 7024 => $expand!(-1168), + 7032 => $expand!(-1160), + 7040 => $expand!(-1152), + 7048 => $expand!(-1144), + 7056 => $expand!(-1136), + 7064 => $expand!(-1128), + 7072 => $expand!(-1120), + 7080 => $expand!(-1112), + 7088 => $expand!(-1104), + 7096 => $expand!(-1096), + 7104 => $expand!(-1088), + 7112 => $expand!(-1080), + 7120 => $expand!(-1072), + 7128 => $expand!(-1064), + 7136 => $expand!(-1056), + 7144 => $expand!(-1048), + 7152 => $expand!(-1040), + 7160 => $expand!(-1032), + 7168 => $expand!(-1024), + 7176 => $expand!(-1016), + 7184 => $expand!(-1008), + 7192 => $expand!(-1000), + 7100 => $expand!(-992), + 7208 => $expand!(-984), + 7216 => $expand!(-976), + 7224 => $expand!(-968), + 7232 => $expand!(-960), + 7240 => $expand!(-952), + 7248 => $expand!(-944), + 7256 => $expand!(-936), + 7264 => $expand!(-928), + 7272 => $expand!(-920), + 7280 => $expand!(-912), + 7288 => $expand!(-904), + 7296 => $expand!(-896), + 7304 => $expand!(-888), + 7312 => $expand!(-880), + 7320 => $expand!(-872), + 7328 => $expand!(-864), + 7336 => $expand!(-856), + 7344 => $expand!(-848), + 7352 => $expand!(-840), + 7360 => $expand!(-832), + 7368 => $expand!(-824), + 7376 => $expand!(-816), + 7384 => $expand!(-808), + 7392 => $expand!(-800), + 7400 => $expand!(-792), + 7408 => $expand!(-784), + 7416 => $expand!(-776), + 7424 => $expand!(-768), + 7432 => $expand!(-760), + 7440 => $expand!(-752), + 7448 => $expand!(-744), + 7456 => $expand!(-736), + 7464 => $expand!(-728), + 7472 => $expand!(-720), + 7480 => $expand!(-712), + 7488 => $expand!(-704), + 7496 => $expand!(-696), + 7504 => $expand!(-688), + 7512 => $expand!(-680), + 7520 => $expand!(-672), + 7528 => $expand!(-664), + 7536 => $expand!(-656), + 7544 => $expand!(-648), + 7552 => $expand!(-640), + 7560 => $expand!(-632), + 7568 => $expand!(-624), + 7576 => $expand!(-616), + 7584 => $expand!(-608), + 7592 => $expand!(-600), + 7600 => $expand!(-592), + 7608 => $expand!(-584), + 7616 => $expand!(-576), + 7624 => $expand!(-568), + 7632 => $expand!(-560), + 7640 => $expand!(-552), + 7648 => $expand!(-544), + 7656 => $expand!(-536), + 7664 => $expand!(-528), + 7672 => $expand!(-520), + 7680 => $expand!(-512), + 7688 => $expand!(-504), + 7696 => $expand!(-496), + 7704 => $expand!(-488), + 7712 => $expand!(-480), + 7720 => $expand!(-472), + 7728 => $expand!(-464), + 7736 => $expand!(-456), + 7744 => $expand!(-448), + 7752 => $expand!(-440), + 7760 => $expand!(-432), + 7768 => $expand!(-424), + 7776 => $expand!(-416), + 7784 => $expand!(-408), + 7792 => $expand!(-400), + 7700 => $expand!(-392), + 7808 => $expand!(-384), + 7816 => $expand!(-376), + 7824 => $expand!(-368), + 7832 => $expand!(-360), + 7840 => $expand!(-352), + 7848 => $expand!(-344), + 7856 => $expand!(-336), + 7864 => $expand!(-328), + 7872 => $expand!(-320), + 7880 => $expand!(-312), + 7888 => $expand!(-304), + 7896 => $expand!(-296), + 7904 => $expand!(-288), + 7912 => $expand!(-280), + 7920 => $expand!(-272), + 7928 => $expand!(-264), + 7936 => $expand!(-256), + 7944 => $expand!(-248), + 7952 => $expand!(-240), + 7960 => $expand!(-232), + 7968 => $expand!(-224), + 7976 => $expand!(-216), + 7984 => $expand!(-208), + 7992 => $expand!(-200), + 8000 => $expand!(-192), + 8008 => $expand!(-184), + 8016 => $expand!(-176), + 8024 => $expand!(-168), + 8032 => $expand!(-160), + 8040 => $expand!(-152), + 8048 => $expand!(-144), + 8056 => $expand!(-136), + 8064 => $expand!(-128), + 8072 => $expand!(-120), + 8080 => $expand!(-112), + 8088 => $expand!(-104), + 8096 => $expand!(-96), + 8104 => $expand!(-88), + 8112 => $expand!(-80), + 8120 => $expand!(-72), + 8128 => $expand!(-64), + 8136 => $expand!(-56), + 8144 => $expand!(-48), + 8152 => $expand!(-40), + 8160 => $expand!(-32), + 8168 => $expand!(-24), + 8176 => $expand!(-16), + 8184 => $expand!(-8), + _ => $expand!(4088) + } + }; +} + +macro_rules! constify_imm_s12 { + ($imm_s12:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm_s12) & 0b1111_1111_1111 { + 0 => $expand!(0), + 4 => $expand!(4), + 8 => $expand!(8), + 12 => $expand!(12), + 16 => $expand!(16), + 20 => $expand!(20), + 24 => $expand!(24), + 28 => $expand!(28), + 32 => $expand!(32), + 36 => $expand!(36), + 40 => $expand!(40), + 44 => $expand!(44), + 48 => $expand!(48), + 52 => $expand!(52), + 56 => $expand!(56), + 60 => $expand!(60), + 64 => $expand!(64), + 68 => $expand!(68), + 72 => $expand!(72), + 76 => $expand!(76), + 80 => $expand!(80), + 84 => $expand!(84), + 88 => $expand!(88), + 92 => $expand!(92), + 96 => $expand!(96), + 100 => $expand!(100), + 104 => $expand!(104), + 108 => $expand!(108), + 112 => $expand!(112), + 116 => $expand!(116), + 120 => $expand!(120), + 124 => $expand!(124), + 128 => $expand!(128), + 132 => $expand!(132), + 136 => $expand!(136), + 140 => $expand!(140), + 144 => $expand!(144), + 148 => $expand!(148), + 152 => $expand!(152), + 156 => $expand!(156), + 160 => $expand!(160), + 164 => $expand!(164), + 168 => $expand!(168), + 172 => $expand!(172), + 176 => $expand!(176), + 180 => $expand!(180), + 184 => $expand!(184), + 188 => $expand!(188), + 192 => $expand!(192), + 196 => $expand!(196), + 200 => $expand!(200), + 204 => $expand!(204), + 208 => $expand!(208), + 212 => $expand!(212), + 216 => $expand!(216), + 220 => $expand!(220), + 224 => $expand!(224), + 228 => $expand!(228), + 232 => $expand!(232), + 236 => $expand!(236), + 240 => $expand!(240), + 244 => $expand!(244), + 248 => $expand!(248), + 252 => $expand!(252), + 256 => $expand!(256), + 260 => $expand!(260), + 264 => $expand!(264), + 268 => $expand!(268), + 272 => $expand!(272), + 276 => $expand!(276), + 280 => $expand!(280), + 284 => $expand!(284), + 288 => $expand!(288), + 292 => $expand!(292), + 296 => $expand!(296), + 300 => $expand!(300), + 304 => $expand!(304), + 308 => $expand!(308), + 312 => $expand!(312), + 316 => $expand!(316), + 320 => $expand!(320), + 324 => $expand!(324), + 328 => $expand!(328), + 332 => $expand!(332), + 336 => $expand!(336), + 340 => $expand!(340), + 344 => $expand!(344), + 348 => $expand!(348), + 352 => $expand!(352), + 356 => $expand!(356), + 360 => $expand!(360), + 364 => $expand!(364), + 368 => $expand!(368), + 372 => $expand!(372), + 376 => $expand!(376), + 380 => $expand!(380), + 384 => $expand!(384), + 388 => $expand!(388), + 392 => $expand!(392), + 396 => $expand!(396), + 400 => $expand!(400), + 404 => $expand!(404), + 408 => $expand!(408), + 412 => $expand!(412), + 416 => $expand!(416), + 420 => $expand!(420), + 424 => $expand!(424), + 428 => $expand!(428), + 432 => $expand!(432), + 436 => $expand!(436), + 440 => $expand!(440), + 444 => $expand!(444), + 448 => $expand!(448), + 452 => $expand!(452), + 456 => $expand!(456), + 460 => $expand!(460), + 464 => $expand!(464), + 468 => $expand!(468), + 472 => $expand!(472), + 476 => $expand!(476), + 480 => $expand!(480), + 484 => $expand!(484), + 488 => $expand!(488), + 492 => $expand!(492), + 496 => $expand!(496), + 500 => $expand!(500), + 504 => $expand!(504), + 508 => $expand!(508), + 512 => $expand!(512), + 516 => $expand!(516), + 520 => $expand!(520), + 524 => $expand!(524), + 528 => $expand!(528), + 532 => $expand!(532), + 536 => $expand!(536), + 540 => $expand!(540), + 544 => $expand!(544), + 548 => $expand!(548), + 552 => $expand!(552), + 556 => $expand!(556), + 560 => $expand!(560), + 564 => $expand!(564), + 568 => $expand!(568), + 572 => $expand!(572), + 576 => $expand!(576), + 580 => $expand!(580), + 584 => $expand!(584), + 588 => $expand!(588), + 592 => $expand!(592), + 596 => $expand!(596), + 600 => $expand!(600), + 604 => $expand!(604), + 608 => $expand!(608), + 612 => $expand!(612), + 616 => $expand!(616), + 620 => $expand!(620), + 624 => $expand!(624), + 628 => $expand!(628), + 632 => $expand!(632), + 636 => $expand!(636), + 640 => $expand!(640), + 644 => $expand!(644), + 648 => $expand!(648), + 652 => $expand!(652), + 656 => $expand!(656), + 660 => $expand!(660), + 664 => $expand!(664), + 668 => $expand!(668), + 672 => $expand!(672), + 676 => $expand!(676), + 680 => $expand!(680), + 684 => $expand!(684), + 688 => $expand!(688), + 692 => $expand!(692), + 696 => $expand!(696), + 700 => $expand!(700), + 704 => $expand!(704), + 708 => $expand!(708), + 712 => $expand!(712), + 716 => $expand!(716), + 720 => $expand!(720), + 724 => $expand!(724), + 728 => $expand!(728), + 732 => $expand!(732), + 736 => $expand!(736), + 740 => $expand!(740), + 744 => $expand!(744), + 748 => $expand!(748), + 752 => $expand!(752), + 756 => $expand!(756), + 760 => $expand!(760), + 764 => $expand!(764), + 768 => $expand!(768), + 772 => $expand!(772), + 776 => $expand!(776), + 780 => $expand!(780), + 784 => $expand!(784), + 788 => $expand!(788), + 792 => $expand!(792), + 800 => $expand!(800), + 804 => $expand!(804), + 808 => $expand!(808), + 812 => $expand!(812), + 816 => $expand!(816), + 820 => $expand!(820), + 824 => $expand!(824), + 828 => $expand!(828), + 832 => $expand!(832), + 836 => $expand!(836), + 840 => $expand!(840), + 844 => $expand!(844), + 848 => $expand!(848), + 852 => $expand!(852), + 856 => $expand!(856), + 860 => $expand!(860), + 864 => $expand!(864), + 868 => $expand!(868), + 872 => $expand!(872), + 876 => $expand!(876), + 880 => $expand!(880), + 884 => $expand!(884), + 888 => $expand!(888), + 892 => $expand!(892), + 896 => $expand!(896), + 900 => $expand!(900), + 904 => $expand!(904), + 908 => $expand!(908), + 912 => $expand!(912), + 916 => $expand!(916), + 920 => $expand!(920), + 924 => $expand!(924), + 928 => $expand!(928), + 932 => $expand!(932), + 936 => $expand!(936), + 940 => $expand!(940), + 944 => $expand!(944), + 948 => $expand!(948), + 952 => $expand!(952), + 956 => $expand!(956), + 960 => $expand!(960), + 964 => $expand!(964), + 968 => $expand!(968), + 972 => $expand!(972), + 976 => $expand!(976), + 980 => $expand!(980), + 984 => $expand!(984), + 988 => $expand!(988), + 992 => $expand!(992), + 996 => $expand!(996), + 1000 => $expand!(1000), + 1004 => $expand!(1004), + 1008 => $expand!(1008), + 1012 => $expand!(1012), + 1016 => $expand!(1016), + 1020 => $expand!(1020), + 1024 => $expand!(1024), + 1028 => $expand!(1028), + 1032 => $expand!(1032), + 1036 => $expand!(1036), + 1040 => $expand!(1040), + 1044 => $expand!(1044), + 1048 => $expand!(1048), + 1052 => $expand!(1052), + 1056 => $expand!(1056), + 1060 => $expand!(1060), + 1064 => $expand!(1064), + 1068 => $expand!(1068), + 1072 => $expand!(1072), + 1076 => $expand!(1076), + 1080 => $expand!(1080), + 1084 => $expand!(1084), + 1088 => $expand!(1088), + 1092 => $expand!(1092), + 1096 => $expand!(1096), + 1100 => $expand!(1100), + 1104 => $expand!(1104), + 1108 => $expand!(1108), + 1112 => $expand!(1112), + 1116 => $expand!(1116), + 1120 => $expand!(1120), + 1124 => $expand!(1124), + 1128 => $expand!(1128), + 1132 => $expand!(1132), + 1136 => $expand!(1136), + 1140 => $expand!(1140), + 1144 => $expand!(1144), + 1148 => $expand!(1148), + 1152 => $expand!(1152), + 1156 => $expand!(1156), + 1160 => $expand!(1160), + 1164 => $expand!(1164), + 1168 => $expand!(1168), + 1172 => $expand!(1172), + 1176 => $expand!(1176), + 1180 => $expand!(1180), + 1184 => $expand!(1184), + 1188 => $expand!(1188), + 1192 => $expand!(1192), + 1196 => $expand!(1196), + 1200 => $expand!(1200), + 1204 => $expand!(1204), + 1208 => $expand!(1208), + 1212 => $expand!(1212), + 1216 => $expand!(1216), + 1220 => $expand!(1220), + 1224 => $expand!(1224), + 1228 => $expand!(1228), + 1232 => $expand!(1232), + 1236 => $expand!(1236), + 1240 => $expand!(1240), + 1244 => $expand!(1244), + 1248 => $expand!(1248), + 1252 => $expand!(1252), + 1256 => $expand!(1256), + 1260 => $expand!(1260), + 1264 => $expand!(1264), + 1268 => $expand!(1268), + 1272 => $expand!(1272), + 1276 => $expand!(1276), + 1280 => $expand!(1280), + 1284 => $expand!(1284), + 1288 => $expand!(1288), + 1292 => $expand!(1292), + 1296 => $expand!(1296), + 1300 => $expand!(1300), + 1304 => $expand!(1304), + 1308 => $expand!(1308), + 1312 => $expand!(1312), + 1316 => $expand!(1316), + 1320 => $expand!(1320), + 1324 => $expand!(1324), + 1328 => $expand!(1328), + 1332 => $expand!(1332), + 1336 => $expand!(1336), + 1340 => $expand!(1340), + 1344 => $expand!(1344), + 1348 => $expand!(1348), + 1352 => $expand!(1352), + 1356 => $expand!(1356), + 1360 => $expand!(1360), + 1364 => $expand!(1364), + 1368 => $expand!(1368), + 1372 => $expand!(1372), + 1376 => $expand!(1376), + 1380 => $expand!(1380), + 1384 => $expand!(1384), + 1388 => $expand!(1388), + 1392 => $expand!(1392), + 1396 => $expand!(1396), + 1400 => $expand!(1400), + 1404 => $expand!(1404), + 1408 => $expand!(1408), + 1412 => $expand!(1412), + 1416 => $expand!(1416), + 1420 => $expand!(1420), + 1424 => $expand!(1424), + 1428 => $expand!(1428), + 1432 => $expand!(1432), + 1436 => $expand!(1436), + 1440 => $expand!(1440), + 1444 => $expand!(1444), + 1448 => $expand!(1448), + 1452 => $expand!(1452), + 1456 => $expand!(1456), + 1460 => $expand!(1460), + 1464 => $expand!(1464), + 1468 => $expand!(1468), + 1472 => $expand!(1472), + 1476 => $expand!(1476), + 1480 => $expand!(1480), + 1484 => $expand!(1484), + 1488 => $expand!(1488), + 1492 => $expand!(1492), + 1496 => $expand!(1496), + 1500 => $expand!(1500), + 1504 => $expand!(1504), + 1508 => $expand!(1508), + 1512 => $expand!(1512), + 1516 => $expand!(1516), + 1520 => $expand!(1520), + 1524 => $expand!(1524), + 1528 => $expand!(1528), + 1532 => $expand!(1532), + 1536 => $expand!(1536), + 1540 => $expand!(1540), + 1544 => $expand!(1544), + 1548 => $expand!(1548), + 1552 => $expand!(1552), + 1556 => $expand!(1556), + 1560 => $expand!(1560), + 1564 => $expand!(1564), + 1568 => $expand!(1568), + 1572 => $expand!(1572), + 1576 => $expand!(1576), + 1580 => $expand!(1580), + 1584 => $expand!(1584), + 1588 => $expand!(1588), + 1592 => $expand!(1592), + 1596 => $expand!(1596), + 1600 => $expand!(1600), + 1604 => $expand!(1604), + 1608 => $expand!(1608), + 1612 => $expand!(1612), + 1616 => $expand!(1616), + 1620 => $expand!(1620), + 1624 => $expand!(1624), + 1628 => $expand!(1628), + 1632 => $expand!(1632), + 1636 => $expand!(1636), + 1640 => $expand!(1640), + 1644 => $expand!(1644), + 1648 => $expand!(1648), + 1652 => $expand!(1652), + 1656 => $expand!(1656), + 1660 => $expand!(1660), + 1664 => $expand!(1664), + 1668 => $expand!(1668), + 1672 => $expand!(1672), + 1676 => $expand!(1676), + 1680 => $expand!(1680), + 1684 => $expand!(1684), + 1688 => $expand!(1688), + 1692 => $expand!(1692), + 1696 => $expand!(1696), + 1700 => $expand!(1700), + 1704 => $expand!(1704), + 1708 => $expand!(1708), + 1712 => $expand!(1712), + 1716 => $expand!(1716), + 1720 => $expand!(1720), + 1724 => $expand!(1724), + 1728 => $expand!(1728), + 1732 => $expand!(1732), + 1736 => $expand!(1736), + 1740 => $expand!(1740), + 1744 => $expand!(1744), + 1748 => $expand!(1748), + 1752 => $expand!(1752), + 1756 => $expand!(1756), + 1760 => $expand!(1760), + 1764 => $expand!(1764), + 1768 => $expand!(1768), + 1772 => $expand!(1772), + 1776 => $expand!(1776), + 1780 => $expand!(1780), + 1784 => $expand!(1784), + 1788 => $expand!(1788), + 1792 => $expand!(1792), + 1796 => $expand!(1796), + 1800 => $expand!(1800), + 1804 => $expand!(1804), + 1808 => $expand!(1808), + 1812 => $expand!(1812), + 1816 => $expand!(1816), + 1820 => $expand!(1820), + 1824 => $expand!(1824), + 1828 => $expand!(1828), + 1832 => $expand!(1832), + 1836 => $expand!(1836), + 1840 => $expand!(1840), + 1844 => $expand!(1844), + 1848 => $expand!(1848), + 1852 => $expand!(1852), + 1856 => $expand!(1856), + 1860 => $expand!(1860), + 1864 => $expand!(1864), + 1868 => $expand!(1868), + 1872 => $expand!(1872), + 1876 => $expand!(1876), + 1880 => $expand!(1880), + 1884 => $expand!(1884), + 1888 => $expand!(1888), + 1892 => $expand!(1892), + 1896 => $expand!(1896), + 1900 => $expand!(1900), + 1904 => $expand!(1904), + 1908 => $expand!(1908), + 1912 => $expand!(1912), + 1916 => $expand!(1916), + 1920 => $expand!(1920), + 1924 => $expand!(1924), + 1928 => $expand!(1928), + 1932 => $expand!(1932), + 1936 => $expand!(1936), + 1940 => $expand!(1940), + 1944 => $expand!(1944), + 1948 => $expand!(1948), + 1952 => $expand!(1952), + 1956 => $expand!(1956), + 1960 => $expand!(1960), + 1964 => $expand!(1964), + 1968 => $expand!(1968), + 1972 => $expand!(1972), + 1976 => $expand!(1976), + 1980 => $expand!(1980), + 1984 => $expand!(1984), + 1988 => $expand!(1988), + 1992 => $expand!(1992), + 1996 => $expand!(1996), + 2000 => $expand!(2000), + 2004 => $expand!(2004), + 2008 => $expand!(2008), + 2012 => $expand!(2012), + 2016 => $expand!(2016), + 2020 => $expand!(2020), + 2024 => $expand!(2024), + 2028 => $expand!(2028), + 2032 => $expand!(2032), + 2036 => $expand!(2036), + 2040 => $expand!(2040), + 2048 => $expand!(-2048), + 2052 => $expand!(-2044), + 2056 => $expand!(-2040), + 2060 => $expand!(-2036), + 2064 => $expand!(-2032), + 2068 => $expand!(-2028), + 2072 => $expand!(-2024), + 2076 => $expand!(-2020), + 2080 => $expand!(-2016), + 2084 => $expand!(-2012), + 2088 => $expand!(-2008), + 2092 => $expand!(-2004), + 2096 => $expand!(-2000), + 2100 => $expand!(-1996), + 2104 => $expand!(-1992), + 2108 => $expand!(-1988), + 2112 => $expand!(-1984), + 2116 => $expand!(-1980), + 2120 => $expand!(-1976), + 2124 => $expand!(-1972), + 2128 => $expand!(-1968), + 2132 => $expand!(-1964), + 2136 => $expand!(-1960), + 2140 => $expand!(-1956), + 2144 => $expand!(-1952), + 2148 => $expand!(-1948), + 2152 => $expand!(-1944), + 2156 => $expand!(-1940), + 2160 => $expand!(-1936), + 2164 => $expand!(-1932), + 2168 => $expand!(-1928), + 2172 => $expand!(-1924), + 2176 => $expand!(-1920), + 2180 => $expand!(-1916), + 2184 => $expand!(-1912), + 2188 => $expand!(-1908), + 2192 => $expand!(-1904), + 2196 => $expand!(-1900), + 2200 => $expand!(-1896), + 2204 => $expand!(-1892), + 2208 => $expand!(-1888), + 2212 => $expand!(-1884), + 2216 => $expand!(-1880), + 2220 => $expand!(-1876), + 2224 => $expand!(-1872), + 2228 => $expand!(-1868), + 2232 => $expand!(-1864), + 2236 => $expand!(-1860), + 2240 => $expand!(-1856), + 2244 => $expand!(-1852), + 2248 => $expand!(-1848), + 2252 => $expand!(-1844), + 2256 => $expand!(-1840), + 2260 => $expand!(-1836), + 2264 => $expand!(-1832), + 2268 => $expand!(-1828), + 2272 => $expand!(-1824), + 2276 => $expand!(-1820), + 2280 => $expand!(-1816), + 2284 => $expand!(-1812), + 2288 => $expand!(-1808), + 2292 => $expand!(-1804), + 2296 => $expand!(-1800), + 2300 => $expand!(-1796), + 2304 => $expand!(-1792), + 2308 => $expand!(-1788), + 2312 => $expand!(-1784), + 2316 => $expand!(-1780), + 2320 => $expand!(-1776), + 2324 => $expand!(-1772), + 2328 => $expand!(-1768), + 2332 => $expand!(-1764), + 2336 => $expand!(-1760), + 2340 => $expand!(-1756), + 2344 => $expand!(-1752), + 2348 => $expand!(-1748), + 2352 => $expand!(-1744), + 2356 => $expand!(-1740), + 2360 => $expand!(-1736), + 2364 => $expand!(-1732), + 2368 => $expand!(-1728), + 2372 => $expand!(-1724), + 2376 => $expand!(-1720), + 2380 => $expand!(-1716), + 2384 => $expand!(-1712), + 2388 => $expand!(-1708), + 2392 => $expand!(-1704), + 2396 => $expand!(-1700), + 2400 => $expand!(-1696), + 2404 => $expand!(-1692), + 2408 => $expand!(-1688), + 2412 => $expand!(-1684), + 2416 => $expand!(-1680), + 2420 => $expand!(-1676), + 2424 => $expand!(-1672), + 2428 => $expand!(-1668), + 2432 => $expand!(-1664), + 2436 => $expand!(-1660), + 2440 => $expand!(-1656), + 2444 => $expand!(-1652), + 2448 => $expand!(-1648), + 2452 => $expand!(-1644), + 2456 => $expand!(-1640), + 2460 => $expand!(-1636), + 2464 => $expand!(-1632), + 2468 => $expand!(-1628), + 2472 => $expand!(-1624), + 2476 => $expand!(-1620), + 2480 => $expand!(-1616), + 2484 => $expand!(-1612), + 2488 => $expand!(-1608), + 2492 => $expand!(-1604), + 2496 => $expand!(-1600), + 2500 => $expand!(-1596), + 2504 => $expand!(-1592), + 2508 => $expand!(-1588), + 2512 => $expand!(-1584), + 2516 => $expand!(-1580), + 2520 => $expand!(-1576), + 2524 => $expand!(-1572), + 2528 => $expand!(-1568), + 2532 => $expand!(-1564), + 2536 => $expand!(-1560), + 2540 => $expand!(-1556), + 2544 => $expand!(-1552), + 2548 => $expand!(-1548), + 2552 => $expand!(-1544), + 2556 => $expand!(-1540), + 2560 => $expand!(-1536), + 2564 => $expand!(-1532), + 2568 => $expand!(-1528), + 2572 => $expand!(-1524), + 2576 => $expand!(-1520), + 2580 => $expand!(-1516), + 2584 => $expand!(-1512), + 2588 => $expand!(-1508), + 2592 => $expand!(-1504), + 2596 => $expand!(-1500), + 2600 => $expand!(-1496), + 2604 => $expand!(-1492), + 2608 => $expand!(-1488), + 2612 => $expand!(-1484), + 2616 => $expand!(-1480), + 2620 => $expand!(-1476), + 2624 => $expand!(-1472), + 2628 => $expand!(-1468), + 2632 => $expand!(-1464), + 2636 => $expand!(-1460), + 2640 => $expand!(-1456), + 2644 => $expand!(-1452), + 2648 => $expand!(-1448), + 2652 => $expand!(-1444), + 2656 => $expand!(-1440), + 2660 => $expand!(-1436), + 2664 => $expand!(-1432), + 2668 => $expand!(-1428), + 2672 => $expand!(-1424), + 2676 => $expand!(-1420), + 2680 => $expand!(-1416), + 2684 => $expand!(-1412), + 2688 => $expand!(-1408), + 2692 => $expand!(-1404), + 2696 => $expand!(-1400), + 2700 => $expand!(-1396), + 2704 => $expand!(-1392), + 2708 => $expand!(-1388), + 2712 => $expand!(-1384), + 2716 => $expand!(-1380), + 2720 => $expand!(-1376), + 2724 => $expand!(-1372), + 2728 => $expand!(-1368), + 2732 => $expand!(-1364), + 2736 => $expand!(-1360), + 2740 => $expand!(-1356), + 2744 => $expand!(-1352), + 2748 => $expand!(-1348), + 2752 => $expand!(-1344), + 2756 => $expand!(-1340), + 2760 => $expand!(-1336), + 2764 => $expand!(-1332), + 2768 => $expand!(-1328), + 2772 => $expand!(-1324), + 2776 => $expand!(-1320), + 2780 => $expand!(-1316), + 2784 => $expand!(-1312), + 2788 => $expand!(-1308), + 2792 => $expand!(-1304), + 2796 => $expand!(-1300), + 2800 => $expand!(-1296), + 2804 => $expand!(-1292), + 2808 => $expand!(-1288), + 2812 => $expand!(-1284), + 2816 => $expand!(-1280), + 2820 => $expand!(-1276), + 2824 => $expand!(-1272), + 2828 => $expand!(-1268), + 2832 => $expand!(-1264), + 2836 => $expand!(-1260), + 2840 => $expand!(-1256), + 2844 => $expand!(-1252), + 2848 => $expand!(-1248), + 2852 => $expand!(-1244), + 2856 => $expand!(-1240), + 2860 => $expand!(-1236), + 2864 => $expand!(-1232), + 2868 => $expand!(-1228), + 2872 => $expand!(-1224), + 2876 => $expand!(-1220), + 2880 => $expand!(-1216), + 2884 => $expand!(-1212), + 2888 => $expand!(-1208), + 2892 => $expand!(-1204), + 2896 => $expand!(-1200), + 2900 => $expand!(-1196), + 2904 => $expand!(-1192), + 2908 => $expand!(-1188), + 2912 => $expand!(-1184), + 2916 => $expand!(-1180), + 2920 => $expand!(-1176), + 2924 => $expand!(-1172), + 2928 => $expand!(-1168), + 2932 => $expand!(-1164), + 2936 => $expand!(-1160), + 2940 => $expand!(-1156), + 2944 => $expand!(-1152), + 2948 => $expand!(-1148), + 2952 => $expand!(-1144), + 2956 => $expand!(-1140), + 2960 => $expand!(-1136), + 2964 => $expand!(-1132), + 2968 => $expand!(-1128), + 2972 => $expand!(-1124), + 2976 => $expand!(-1120), + 2980 => $expand!(-1116), + 2984 => $expand!(-1112), + 2988 => $expand!(-1108), + 2992 => $expand!(-1104), + 2996 => $expand!(-1100), + 3000 => $expand!(-1096), + 3004 => $expand!(-1092), + 3008 => $expand!(-1088), + 3012 => $expand!(-1084), + 3016 => $expand!(-1080), + 3020 => $expand!(-1076), + 3024 => $expand!(-1072), + 3028 => $expand!(-1068), + 3032 => $expand!(-1064), + 3036 => $expand!(-1060), + 3040 => $expand!(-1056), + 3044 => $expand!(-1052), + 3048 => $expand!(-1048), + 3052 => $expand!(-1044), + 3056 => $expand!(-1040), + 3060 => $expand!(-1036), + 3064 => $expand!(-1032), + 3068 => $expand!(-1028), + 3072 => $expand!(-1024), + 3076 => $expand!(-1020), + 3080 => $expand!(-1016), + 3084 => $expand!(-1012), + 3088 => $expand!(-1008), + 3092 => $expand!(-1004), + 3096 => $expand!(-1000), + 3100 => $expand!(-996), + 3104 => $expand!(-992), + 3108 => $expand!(-988), + 3112 => $expand!(-984), + 3116 => $expand!(-980), + 3120 => $expand!(-976), + 3124 => $expand!(-972), + 3128 => $expand!(-968), + 3132 => $expand!(-964), + 3136 => $expand!(-960), + 3140 => $expand!(-956), + 3144 => $expand!(-952), + 3148 => $expand!(-948), + 3152 => $expand!(-944), + 3156 => $expand!(-940), + 3160 => $expand!(-936), + 3164 => $expand!(-932), + 3168 => $expand!(-928), + 3172 => $expand!(-924), + 3176 => $expand!(-920), + 3180 => $expand!(-916), + 3184 => $expand!(-912), + 3188 => $expand!(-908), + 3192 => $expand!(-904), + 3196 => $expand!(-900), + 3200 => $expand!(-896), + 3204 => $expand!(-892), + 3208 => $expand!(-888), + 3212 => $expand!(-884), + 3216 => $expand!(-880), + 3220 => $expand!(-876), + 3224 => $expand!(-872), + 3228 => $expand!(-868), + 3232 => $expand!(-864), + 3236 => $expand!(-860), + 3240 => $expand!(-856), + 3244 => $expand!(-852), + 3248 => $expand!(-848), + 3252 => $expand!(-844), + 3256 => $expand!(-840), + 3260 => $expand!(-836), + 3264 => $expand!(-832), + 3268 => $expand!(-828), + 3272 => $expand!(-824), + 3276 => $expand!(-820), + 3280 => $expand!(-816), + 3284 => $expand!(-812), + 3288 => $expand!(-808), + 3292 => $expand!(-804), + 3296 => $expand!(-800), + 3300 => $expand!(-796), + 3304 => $expand!(-792), + 3308 => $expand!(-788), + 3312 => $expand!(-784), + 3316 => $expand!(-780), + 3320 => $expand!(-776), + 3324 => $expand!(-772), + 3328 => $expand!(-768), + 3332 => $expand!(-764), + 3336 => $expand!(-760), + 3340 => $expand!(-756), + 3344 => $expand!(-752), + 3348 => $expand!(-748), + 3352 => $expand!(-744), + 3356 => $expand!(-740), + 3360 => $expand!(-736), + 3364 => $expand!(-732), + 3368 => $expand!(-728), + 3372 => $expand!(-724), + 3376 => $expand!(-720), + 3380 => $expand!(-716), + 3384 => $expand!(-712), + 3388 => $expand!(-708), + 3392 => $expand!(-704), + 3396 => $expand!(-700), + 3400 => $expand!(-696), + 3404 => $expand!(-692), + 3408 => $expand!(-688), + 3412 => $expand!(-684), + 3416 => $expand!(-680), + 3420 => $expand!(-676), + 3424 => $expand!(-672), + 3428 => $expand!(-668), + 3432 => $expand!(-664), + 3436 => $expand!(-660), + 3440 => $expand!(-656), + 3444 => $expand!(-652), + 3448 => $expand!(-648), + 3452 => $expand!(-644), + 3456 => $expand!(-640), + 3460 => $expand!(-636), + 3464 => $expand!(-632), + 3468 => $expand!(-628), + 3472 => $expand!(-624), + 3476 => $expand!(-620), + 3480 => $expand!(-616), + 3484 => $expand!(-612), + 3488 => $expand!(-608), + 3492 => $expand!(-604), + 3496 => $expand!(-600), + 3500 => $expand!(-596), + 3504 => $expand!(-592), + 3508 => $expand!(-588), + 3512 => $expand!(-584), + 3516 => $expand!(-580), + 3520 => $expand!(-576), + 3524 => $expand!(-572), + 3528 => $expand!(-568), + 3532 => $expand!(-564), + 3536 => $expand!(-560), + 3540 => $expand!(-556), + 3544 => $expand!(-552), + 3548 => $expand!(-548), + 3552 => $expand!(-544), + 3556 => $expand!(-540), + 3560 => $expand!(-536), + 3564 => $expand!(-532), + 3568 => $expand!(-528), + 3572 => $expand!(-524), + 3576 => $expand!(-520), + 3580 => $expand!(-516), + 3584 => $expand!(-512), + 3588 => $expand!(-508), + 3592 => $expand!(-504), + 3596 => $expand!(-500), + 3600 => $expand!(-496), + 3604 => $expand!(-492), + 3608 => $expand!(-488), + 3612 => $expand!(-484), + 3616 => $expand!(-480), + 3620 => $expand!(-476), + 3624 => $expand!(-472), + 3628 => $expand!(-468), + 3632 => $expand!(-464), + 3636 => $expand!(-460), + 3640 => $expand!(-456), + 3644 => $expand!(-452), + 3648 => $expand!(-448), + 3652 => $expand!(-444), + 3656 => $expand!(-440), + 3660 => $expand!(-436), + 3664 => $expand!(-432), + 3668 => $expand!(-428), + 3672 => $expand!(-424), + 3676 => $expand!(-420), + 3680 => $expand!(-416), + 3684 => $expand!(-412), + 3688 => $expand!(-408), + 3692 => $expand!(-404), + 3696 => $expand!(-400), + 3700 => $expand!(-396), + 3704 => $expand!(-392), + 3708 => $expand!(-388), + 3712 => $expand!(-384), + 3716 => $expand!(-380), + 3720 => $expand!(-376), + 3724 => $expand!(-372), + 3728 => $expand!(-368), + 3732 => $expand!(-364), + 3736 => $expand!(-360), + 3740 => $expand!(-356), + 3744 => $expand!(-352), + 3748 => $expand!(-348), + 3752 => $expand!(-344), + 3756 => $expand!(-340), + 3760 => $expand!(-336), + 3764 => $expand!(-332), + 3768 => $expand!(-328), + 3772 => $expand!(-324), + 3776 => $expand!(-320), + 3780 => $expand!(-316), + 3784 => $expand!(-312), + 3788 => $expand!(-308), + 3792 => $expand!(-304), + 3796 => $expand!(-300), + 3800 => $expand!(-296), + 3804 => $expand!(-292), + 3808 => $expand!(-288), + 3812 => $expand!(-284), + 3816 => $expand!(-280), + 3820 => $expand!(-276), + 3824 => $expand!(-272), + 3828 => $expand!(-268), + 3832 => $expand!(-264), + 3836 => $expand!(-260), + 3840 => $expand!(-256), + 3844 => $expand!(-252), + 3848 => $expand!(-248), + 3852 => $expand!(-244), + 3856 => $expand!(-240), + 3860 => $expand!(-236), + 3864 => $expand!(-232), + 3868 => $expand!(-228), + 3872 => $expand!(-224), + 3876 => $expand!(-220), + 3880 => $expand!(-216), + 3884 => $expand!(-212), + 3888 => $expand!(-208), + 3892 => $expand!(-204), + 3896 => $expand!(-200), + 3900 => $expand!(-196), + 3904 => $expand!(-192), + 3908 => $expand!(-188), + 3912 => $expand!(-184), + 3916 => $expand!(-180), + 3920 => $expand!(-176), + 3924 => $expand!(-172), + 3928 => $expand!(-168), + 3932 => $expand!(-164), + 3936 => $expand!(-160), + 3940 => $expand!(-156), + 3944 => $expand!(-152), + 3948 => $expand!(-148), + 3952 => $expand!(-144), + 3956 => $expand!(-140), + 3960 => $expand!(-136), + 3964 => $expand!(-132), + 3968 => $expand!(-128), + 3972 => $expand!(-124), + 3976 => $expand!(-120), + 3980 => $expand!(-116), + 3984 => $expand!(-112), + 3988 => $expand!(-108), + 3992 => $expand!(-104), + 3996 => $expand!(-100), + 4000 => $expand!(-96), + 4004 => $expand!(-92), + 4008 => $expand!(-88), + 4012 => $expand!(-84), + 4016 => $expand!(-80), + 4020 => $expand!(-76), + 4024 => $expand!(-72), + 4028 => $expand!(-68), + 4032 => $expand!(-64), + 4036 => $expand!(-60), + 4040 => $expand!(-56), + 4044 => $expand!(-52), + 4048 => $expand!(-48), + 4052 => $expand!(-44), + 4056 => $expand!(-40), + 4060 => $expand!(-36), + 4064 => $expand!(-32), + 4068 => $expand!(-28), + 4072 => $expand!(-24), + 4076 => $expand!(-20), + 4080 => $expand!(-16), + 4084 => $expand!(-12), + 4088 => $expand!(-8), + 4092 => $expand!(-4), + _ => $expand!(2044) + } + }; +} + +macro_rules! constify_imm_s11 { + ($imm_s11:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm_s11) & 0b111_1111_1111 { + 0 => $expand!(0), + 2 => $expand!(2), + 4 => $expand!(4), + 6 => $expand!(6), + 8 => $expand!(8), + 10 => $expand!(10), + 12 => $expand!(12), + 14 => $expand!(14), + 16 => $expand!(16), + 18 => $expand!(18), + 20 => $expand!(20), + 22 => $expand!(22), + 24 => $expand!(24), + 26 => $expand!(26), + 28 => $expand!(28), + 30 => $expand!(30), + 32 => $expand!(32), + 34 => $expand!(34), + 36 => $expand!(36), + 38 => $expand!(38), + 40 => $expand!(40), + 42 => $expand!(42), + 44 => $expand!(44), + 46 => $expand!(46), + 48 => $expand!(48), + 50 => $expand!(50), + 52 => $expand!(52), + 54 => $expand!(54), + 56 => $expand!(56), + 58 => $expand!(58), + 60 => $expand!(60), + 62 => $expand!(62), + 64 => $expand!(64), + 66 => $expand!(66), + 68 => $expand!(68), + 70 => $expand!(70), + 72 => $expand!(72), + 74 => $expand!(74), + 76 => $expand!(76), + 78 => $expand!(78), + 80 => $expand!(80), + 82 => $expand!(82), + 84 => $expand!(84), + 86 => $expand!(86), + 88 => $expand!(88), + 90 => $expand!(90), + 92 => $expand!(92), + 94 => $expand!(94), + 96 => $expand!(96), + 98 => $expand!(98), + 100 => $expand!(100), + 102 => $expand!(102), + 104 => $expand!(104), + 106 => $expand!(106), + 108 => $expand!(108), + 110 => $expand!(110), + 112 => $expand!(112), + 114 => $expand!(114), + 116 => $expand!(116), + 118 => $expand!(118), + 120 => $expand!(120), + 122 => $expand!(122), + 124 => $expand!(124), + 126 => $expand!(126), + 128 => $expand!(128), + 130 => $expand!(130), + 132 => $expand!(132), + 134 => $expand!(134), + 136 => $expand!(136), + 138 => $expand!(138), + 140 => $expand!(140), + 142 => $expand!(142), + 144 => $expand!(144), + 146 => $expand!(146), + 148 => $expand!(148), + 150 => $expand!(150), + 152 => $expand!(152), + 154 => $expand!(154), + 156 => $expand!(156), + 158 => $expand!(158), + 160 => $expand!(160), + 162 => $expand!(162), + 164 => $expand!(164), + 166 => $expand!(166), + 168 => $expand!(168), + 170 => $expand!(170), + 172 => $expand!(172), + 174 => $expand!(174), + 176 => $expand!(176), + 178 => $expand!(178), + 180 => $expand!(180), + 182 => $expand!(182), + 184 => $expand!(184), + 186 => $expand!(186), + 188 => $expand!(188), + 190 => $expand!(190), + 192 => $expand!(192), + 194 => $expand!(194), + 196 => $expand!(196), + 198 => $expand!(198), + 200 => $expand!(200), + 202 => $expand!(202), + 204 => $expand!(204), + 206 => $expand!(206), + 208 => $expand!(208), + 210 => $expand!(210), + 212 => $expand!(212), + 214 => $expand!(214), + 216 => $expand!(216), + 218 => $expand!(218), + 220 => $expand!(220), + 222 => $expand!(222), + 224 => $expand!(224), + 226 => $expand!(226), + 228 => $expand!(228), + 230 => $expand!(230), + 232 => $expand!(232), + 234 => $expand!(234), + 236 => $expand!(236), + 238 => $expand!(238), + 240 => $expand!(240), + 242 => $expand!(242), + 244 => $expand!(244), + 246 => $expand!(246), + 248 => $expand!(248), + 250 => $expand!(250), + 252 => $expand!(252), + 254 => $expand!(254), + 256 => $expand!(256), + 258 => $expand!(258), + 260 => $expand!(260), + 262 => $expand!(262), + 264 => $expand!(264), + 266 => $expand!(266), + 268 => $expand!(268), + 270 => $expand!(270), + 272 => $expand!(272), + 274 => $expand!(274), + 276 => $expand!(276), + 278 => $expand!(278), + 280 => $expand!(280), + 282 => $expand!(282), + 284 => $expand!(284), + 286 => $expand!(286), + 288 => $expand!(288), + 290 => $expand!(290), + 292 => $expand!(292), + 294 => $expand!(294), + 296 => $expand!(296), + 298 => $expand!(298), + 300 => $expand!(300), + 302 => $expand!(302), + 304 => $expand!(304), + 306 => $expand!(306), + 308 => $expand!(308), + 310 => $expand!(310), + 312 => $expand!(312), + 314 => $expand!(314), + 316 => $expand!(316), + 318 => $expand!(318), + 320 => $expand!(320), + 322 => $expand!(322), + 324 => $expand!(324), + 326 => $expand!(326), + 328 => $expand!(328), + 330 => $expand!(330), + 332 => $expand!(332), + 334 => $expand!(334), + 336 => $expand!(336), + 338 => $expand!(338), + 340 => $expand!(340), + 342 => $expand!(342), + 344 => $expand!(344), + 346 => $expand!(346), + 348 => $expand!(348), + 350 => $expand!(350), + 352 => $expand!(352), + 354 => $expand!(354), + 356 => $expand!(356), + 358 => $expand!(358), + 360 => $expand!(360), + 362 => $expand!(362), + 364 => $expand!(364), + 366 => $expand!(366), + 368 => $expand!(368), + 370 => $expand!(370), + 372 => $expand!(372), + 374 => $expand!(374), + 376 => $expand!(376), + 378 => $expand!(378), + 380 => $expand!(380), + 382 => $expand!(382), + 384 => $expand!(384), + 386 => $expand!(386), + 388 => $expand!(388), + 390 => $expand!(390), + 392 => $expand!(392), + 394 => $expand!(394), + 396 => $expand!(396), + 398 => $expand!(398), + 400 => $expand!(400), + 402 => $expand!(402), + 404 => $expand!(404), + 406 => $expand!(406), + 408 => $expand!(408), + 410 => $expand!(410), + 412 => $expand!(412), + 414 => $expand!(414), + 416 => $expand!(416), + 418 => $expand!(418), + 420 => $expand!(420), + 422 => $expand!(422), + 424 => $expand!(424), + 426 => $expand!(426), + 428 => $expand!(428), + 430 => $expand!(430), + 432 => $expand!(432), + 434 => $expand!(434), + 436 => $expand!(436), + 438 => $expand!(438), + 440 => $expand!(440), + 442 => $expand!(442), + 444 => $expand!(444), + 446 => $expand!(446), + 448 => $expand!(448), + 450 => $expand!(450), + 452 => $expand!(452), + 454 => $expand!(454), + 456 => $expand!(456), + 458 => $expand!(458), + 460 => $expand!(460), + 462 => $expand!(462), + 464 => $expand!(464), + 466 => $expand!(466), + 468 => $expand!(468), + 470 => $expand!(470), + 472 => $expand!(472), + 474 => $expand!(474), + 476 => $expand!(476), + 478 => $expand!(478), + 480 => $expand!(480), + 482 => $expand!(482), + 484 => $expand!(484), + 486 => $expand!(486), + 488 => $expand!(488), + 490 => $expand!(490), + 492 => $expand!(492), + 494 => $expand!(494), + 496 => $expand!(496), + 498 => $expand!(498), + 500 => $expand!(500), + 502 => $expand!(502), + 504 => $expand!(504), + 506 => $expand!(506), + 508 => $expand!(508), + 510 => $expand!(510), + 512 => $expand!(512), + 514 => $expand!(514), + 516 => $expand!(516), + 518 => $expand!(518), + 520 => $expand!(520), + 522 => $expand!(522), + 524 => $expand!(524), + 526 => $expand!(526), + 528 => $expand!(528), + 530 => $expand!(530), + 532 => $expand!(532), + 534 => $expand!(534), + 536 => $expand!(536), + 538 => $expand!(538), + 540 => $expand!(540), + 542 => $expand!(542), + 544 => $expand!(544), + 546 => $expand!(546), + 548 => $expand!(548), + 550 => $expand!(550), + 552 => $expand!(552), + 554 => $expand!(554), + 556 => $expand!(556), + 558 => $expand!(558), + 560 => $expand!(560), + 562 => $expand!(562), + 564 => $expand!(564), + 566 => $expand!(566), + 568 => $expand!(568), + 570 => $expand!(570), + 572 => $expand!(572), + 574 => $expand!(574), + 576 => $expand!(576), + 578 => $expand!(578), + 580 => $expand!(580), + 582 => $expand!(582), + 584 => $expand!(584), + 586 => $expand!(586), + 588 => $expand!(588), + 590 => $expand!(590), + 592 => $expand!(592), + 594 => $expand!(594), + 596 => $expand!(596), + 598 => $expand!(598), + 600 => $expand!(600), + 602 => $expand!(602), + 604 => $expand!(604), + 606 => $expand!(606), + 608 => $expand!(608), + 610 => $expand!(610), + 612 => $expand!(612), + 614 => $expand!(614), + 616 => $expand!(616), + 618 => $expand!(618), + 620 => $expand!(620), + 622 => $expand!(622), + 624 => $expand!(624), + 626 => $expand!(626), + 628 => $expand!(628), + 630 => $expand!(630), + 632 => $expand!(632), + 634 => $expand!(634), + 636 => $expand!(636), + 638 => $expand!(638), + 640 => $expand!(640), + 642 => $expand!(642), + 644 => $expand!(644), + 646 => $expand!(646), + 648 => $expand!(648), + 650 => $expand!(650), + 652 => $expand!(652), + 654 => $expand!(654), + 656 => $expand!(656), + 658 => $expand!(658), + 660 => $expand!(660), + 662 => $expand!(662), + 664 => $expand!(664), + 666 => $expand!(666), + 668 => $expand!(668), + 670 => $expand!(670), + 672 => $expand!(672), + 674 => $expand!(674), + 676 => $expand!(676), + 678 => $expand!(678), + 680 => $expand!(680), + 682 => $expand!(682), + 684 => $expand!(684), + 686 => $expand!(686), + 688 => $expand!(688), + 690 => $expand!(690), + 692 => $expand!(692), + 694 => $expand!(694), + 696 => $expand!(696), + 698 => $expand!(698), + 700 => $expand!(700), + 702 => $expand!(702), + 704 => $expand!(704), + 706 => $expand!(706), + 708 => $expand!(708), + 710 => $expand!(710), + 712 => $expand!(712), + 714 => $expand!(714), + 716 => $expand!(716), + 718 => $expand!(718), + 720 => $expand!(720), + 722 => $expand!(722), + 724 => $expand!(724), + 726 => $expand!(726), + 728 => $expand!(728), + 730 => $expand!(730), + 732 => $expand!(732), + 734 => $expand!(734), + 736 => $expand!(736), + 738 => $expand!(738), + 740 => $expand!(740), + 742 => $expand!(742), + 744 => $expand!(744), + 746 => $expand!(746), + 748 => $expand!(748), + 750 => $expand!(750), + 752 => $expand!(752), + 754 => $expand!(754), + 756 => $expand!(756), + 758 => $expand!(758), + 760 => $expand!(760), + 762 => $expand!(762), + 764 => $expand!(764), + 766 => $expand!(766), + 768 => $expand!(768), + 770 => $expand!(770), + 772 => $expand!(772), + 774 => $expand!(774), + 776 => $expand!(776), + 778 => $expand!(778), + 780 => $expand!(780), + 782 => $expand!(782), + 784 => $expand!(784), + 786 => $expand!(786), + 788 => $expand!(788), + 790 => $expand!(790), + 792 => $expand!(792), + 794 => $expand!(794), + 796 => $expand!(796), + 798 => $expand!(798), + 800 => $expand!(800), + 802 => $expand!(802), + 804 => $expand!(804), + 806 => $expand!(806), + 808 => $expand!(808), + 810 => $expand!(810), + 812 => $expand!(812), + 814 => $expand!(814), + 816 => $expand!(816), + 818 => $expand!(818), + 820 => $expand!(820), + 822 => $expand!(822), + 824 => $expand!(824), + 826 => $expand!(826), + 828 => $expand!(828), + 830 => $expand!(830), + 832 => $expand!(832), + 834 => $expand!(834), + 836 => $expand!(836), + 838 => $expand!(838), + 840 => $expand!(840), + 842 => $expand!(842), + 844 => $expand!(844), + 846 => $expand!(846), + 848 => $expand!(848), + 850 => $expand!(850), + 852 => $expand!(852), + 854 => $expand!(854), + 856 => $expand!(856), + 858 => $expand!(858), + 860 => $expand!(860), + 862 => $expand!(862), + 864 => $expand!(864), + 866 => $expand!(866), + 868 => $expand!(868), + 870 => $expand!(870), + 872 => $expand!(872), + 874 => $expand!(874), + 876 => $expand!(876), + 878 => $expand!(878), + 880 => $expand!(880), + 882 => $expand!(882), + 884 => $expand!(884), + 886 => $expand!(886), + 888 => $expand!(888), + 890 => $expand!(890), + 892 => $expand!(892), + 894 => $expand!(894), + 896 => $expand!(896), + 898 => $expand!(898), + 900 => $expand!(900), + 902 => $expand!(902), + 904 => $expand!(904), + 906 => $expand!(906), + 908 => $expand!(908), + 910 => $expand!(910), + 912 => $expand!(912), + 914 => $expand!(914), + 916 => $expand!(916), + 918 => $expand!(918), + 920 => $expand!(920), + 922 => $expand!(922), + 924 => $expand!(924), + 926 => $expand!(926), + 928 => $expand!(928), + 930 => $expand!(930), + 932 => $expand!(932), + 934 => $expand!(934), + 936 => $expand!(936), + 938 => $expand!(938), + 940 => $expand!(940), + 942 => $expand!(942), + 944 => $expand!(944), + 946 => $expand!(946), + 948 => $expand!(948), + 950 => $expand!(950), + 952 => $expand!(952), + 954 => $expand!(954), + 956 => $expand!(956), + 958 => $expand!(958), + 960 => $expand!(960), + 962 => $expand!(962), + 964 => $expand!(964), + 966 => $expand!(966), + 968 => $expand!(968), + 970 => $expand!(970), + 972 => $expand!(972), + 974 => $expand!(974), + 976 => $expand!(976), + 978 => $expand!(978), + 980 => $expand!(980), + 982 => $expand!(982), + 984 => $expand!(984), + 986 => $expand!(986), + 988 => $expand!(988), + 990 => $expand!(990), + 992 => $expand!(992), + 994 => $expand!(994), + 996 => $expand!(996), + 998 => $expand!(998), + 1000 => $expand!(1000), + 1002 => $expand!(1002), + 1004 => $expand!(1004), + 1006 => $expand!(1006), + 1008 => $expand!(1008), + 1010 => $expand!(1010), + 1012 => $expand!(1012), + 1014 => $expand!(1014), + 1016 => $expand!(1016), + 1018 => $expand!(1018), + 1020 => $expand!(1020), + 1024 => $expand!(-1024), + 1026 => $expand!(-1022), + 1028 => $expand!(-1020), + 1030 => $expand!(-1018), + 1032 => $expand!(-1016), + 1034 => $expand!(-1014), + 1036 => $expand!(-1012), + 1038 => $expand!(-1010), + 1040 => $expand!(-1008), + 1042 => $expand!(-1006), + 1044 => $expand!(-1004), + 1046 => $expand!(-1002), + 1048 => $expand!(-1000), + 1050 => $expand!(-998), + 1052 => $expand!(-996), + 1054 => $expand!(-994), + 1056 => $expand!(-992), + 1058 => $expand!(-990), + 1060 => $expand!(-988), + 1062 => $expand!(-986), + 1064 => $expand!(-984), + 1066 => $expand!(-982), + 1068 => $expand!(-980), + 1070 => $expand!(-978), + 1072 => $expand!(-976), + 1074 => $expand!(-974), + 1076 => $expand!(-972), + 1078 => $expand!(-970), + 1080 => $expand!(-968), + 1082 => $expand!(-966), + 1084 => $expand!(-964), + 1086 => $expand!(-962), + 1088 => $expand!(-960), + 1090 => $expand!(-958), + 1092 => $expand!(-956), + 1094 => $expand!(-954), + 1096 => $expand!(-952), + 1098 => $expand!(-950), + 1100 => $expand!(-948), + 1102 => $expand!(-946), + 1104 => $expand!(-944), + 1106 => $expand!(-942), + 1108 => $expand!(-940), + 1110 => $expand!(-938), + 1112 => $expand!(-936), + 1114 => $expand!(-934), + 1116 => $expand!(-932), + 1118 => $expand!(-930), + 1120 => $expand!(-928), + 1122 => $expand!(-926), + 1124 => $expand!(-924), + 1126 => $expand!(-922), + 1128 => $expand!(-920), + 1130 => $expand!(-918), + 1132 => $expand!(-916), + 1134 => $expand!(-914), + 1136 => $expand!(-912), + 1138 => $expand!(-910), + 1140 => $expand!(-908), + 1142 => $expand!(-906), + 1144 => $expand!(-904), + 1146 => $expand!(-902), + 1148 => $expand!(-900), + 1150 => $expand!(-898), + 1152 => $expand!(-896), + 1154 => $expand!(-894), + 1156 => $expand!(-892), + 1158 => $expand!(-890), + 1160 => $expand!(-888), + 1162 => $expand!(-886), + 1164 => $expand!(-884), + 1166 => $expand!(-882), + 1168 => $expand!(-880), + 1170 => $expand!(-878), + 1172 => $expand!(-876), + 1174 => $expand!(-874), + 1176 => $expand!(-872), + 1178 => $expand!(-870), + 1180 => $expand!(-868), + 1182 => $expand!(-866), + 1184 => $expand!(-864), + 1186 => $expand!(-862), + 1188 => $expand!(-860), + 1190 => $expand!(-858), + 1192 => $expand!(-856), + 1194 => $expand!(-854), + 1196 => $expand!(-852), + 1198 => $expand!(-850), + 1200 => $expand!(-848), + 1202 => $expand!(-846), + 1204 => $expand!(-844), + 1206 => $expand!(-842), + 1208 => $expand!(-840), + 1210 => $expand!(-838), + 1212 => $expand!(-836), + 1214 => $expand!(-834), + 1216 => $expand!(-832), + 1218 => $expand!(-830), + 1220 => $expand!(-828), + 1222 => $expand!(-826), + 1224 => $expand!(-824), + 1226 => $expand!(-822), + 1228 => $expand!(-820), + 1230 => $expand!(-818), + 1232 => $expand!(-816), + 1234 => $expand!(-814), + 1236 => $expand!(-812), + 1238 => $expand!(-810), + 1240 => $expand!(-808), + 1242 => $expand!(-806), + 1244 => $expand!(-804), + 1246 => $expand!(-802), + 1248 => $expand!(-800), + 1250 => $expand!(-798), + 1252 => $expand!(-796), + 1254 => $expand!(-794), + 1256 => $expand!(-792), + 1258 => $expand!(-790), + 1260 => $expand!(-788), + 1262 => $expand!(-786), + 1264 => $expand!(-784), + 1266 => $expand!(-782), + 1268 => $expand!(-780), + 1270 => $expand!(-778), + 1272 => $expand!(-776), + 1274 => $expand!(-774), + 1276 => $expand!(-772), + 1278 => $expand!(-770), + 1280 => $expand!(-768), + 1282 => $expand!(-766), + 1284 => $expand!(-764), + 1286 => $expand!(-762), + 1288 => $expand!(-760), + 1290 => $expand!(-758), + 1292 => $expand!(-756), + 1294 => $expand!(-754), + 1296 => $expand!(-752), + 1298 => $expand!(-750), + 1300 => $expand!(-748), + 1302 => $expand!(-746), + 1304 => $expand!(-744), + 1306 => $expand!(-742), + 1308 => $expand!(-740), + 1310 => $expand!(-738), + 1312 => $expand!(-736), + 1314 => $expand!(-734), + 1316 => $expand!(-732), + 1318 => $expand!(-730), + 1320 => $expand!(-728), + 1322 => $expand!(-726), + 1324 => $expand!(-724), + 1326 => $expand!(-722), + 1328 => $expand!(-720), + 1330 => $expand!(-718), + 1332 => $expand!(-716), + 1334 => $expand!(-714), + 1336 => $expand!(-712), + 1338 => $expand!(-710), + 1340 => $expand!(-708), + 1342 => $expand!(-706), + 1344 => $expand!(-704), + 1346 => $expand!(-702), + 1348 => $expand!(-700), + 1350 => $expand!(-698), + 1352 => $expand!(-696), + 1354 => $expand!(-694), + 1356 => $expand!(-692), + 1358 => $expand!(-690), + 1360 => $expand!(-688), + 1362 => $expand!(-686), + 1364 => $expand!(-684), + 1366 => $expand!(-682), + 1368 => $expand!(-680), + 1370 => $expand!(-678), + 1372 => $expand!(-676), + 1374 => $expand!(-674), + 1376 => $expand!(-672), + 1378 => $expand!(-670), + 1380 => $expand!(-668), + 1382 => $expand!(-666), + 1384 => $expand!(-664), + 1386 => $expand!(-662), + 1388 => $expand!(-660), + 1390 => $expand!(-658), + 1392 => $expand!(-656), + 1394 => $expand!(-654), + 1396 => $expand!(-652), + 1398 => $expand!(-650), + 1400 => $expand!(-648), + 1402 => $expand!(-646), + 1404 => $expand!(-644), + 1406 => $expand!(-642), + 1408 => $expand!(-640), + 1410 => $expand!(-638), + 1412 => $expand!(-636), + 1414 => $expand!(-634), + 1416 => $expand!(-632), + 1418 => $expand!(-630), + 1420 => $expand!(-628), + 1422 => $expand!(-626), + 1424 => $expand!(-624), + 1426 => $expand!(-622), + 1428 => $expand!(-620), + 1430 => $expand!(-618), + 1432 => $expand!(-616), + 1434 => $expand!(-614), + 1436 => $expand!(-612), + 1438 => $expand!(-610), + 1440 => $expand!(-608), + 1442 => $expand!(-606), + 1444 => $expand!(-604), + 1446 => $expand!(-602), + 1448 => $expand!(-600), + 1450 => $expand!(-598), + 1452 => $expand!(-596), + 1454 => $expand!(-594), + 1456 => $expand!(-592), + 1458 => $expand!(-590), + 1460 => $expand!(-588), + 1462 => $expand!(-586), + 1464 => $expand!(-584), + 1466 => $expand!(-582), + 1468 => $expand!(-580), + 1470 => $expand!(-578), + 1472 => $expand!(-576), + 1474 => $expand!(-574), + 1476 => $expand!(-572), + 1478 => $expand!(-570), + 1480 => $expand!(-568), + 1482 => $expand!(-566), + 1484 => $expand!(-564), + 1486 => $expand!(-562), + 1488 => $expand!(-560), + 1490 => $expand!(-558), + 1492 => $expand!(-556), + 1494 => $expand!(-554), + 1496 => $expand!(-552), + 1498 => $expand!(-550), + 1500 => $expand!(-548), + 1502 => $expand!(-546), + 1504 => $expand!(-544), + 1506 => $expand!(-542), + 1508 => $expand!(-540), + 1510 => $expand!(-538), + 1512 => $expand!(-536), + 1514 => $expand!(-534), + 1516 => $expand!(-532), + 1518 => $expand!(-530), + 1520 => $expand!(-528), + 1522 => $expand!(-526), + 1524 => $expand!(-524), + 1526 => $expand!(-522), + 1528 => $expand!(-520), + 1530 => $expand!(-518), + 1532 => $expand!(-516), + 1534 => $expand!(-514), + 1536 => $expand!(-512), + 1538 => $expand!(-510), + 1540 => $expand!(-508), + 1542 => $expand!(-506), + 1544 => $expand!(-504), + 1546 => $expand!(-502), + 1548 => $expand!(-500), + 1550 => $expand!(-498), + 1552 => $expand!(-496), + 1554 => $expand!(-494), + 1556 => $expand!(-492), + 1558 => $expand!(-490), + 1560 => $expand!(-488), + 1562 => $expand!(-486), + 1564 => $expand!(-484), + 1566 => $expand!(-482), + 1568 => $expand!(-480), + 1570 => $expand!(-478), + 1572 => $expand!(-476), + 1574 => $expand!(-474), + 1576 => $expand!(-472), + 1578 => $expand!(-470), + 1580 => $expand!(-468), + 1582 => $expand!(-466), + 1584 => $expand!(-464), + 1586 => $expand!(-462), + 1588 => $expand!(-460), + 1590 => $expand!(-458), + 1592 => $expand!(-456), + 1594 => $expand!(-454), + 1596 => $expand!(-452), + 1598 => $expand!(-450), + 1600 => $expand!(-448), + 1602 => $expand!(-446), + 1604 => $expand!(-444), + 1606 => $expand!(-442), + 1608 => $expand!(-440), + 1610 => $expand!(-438), + 1612 => $expand!(-436), + 1614 => $expand!(-434), + 1616 => $expand!(-432), + 1618 => $expand!(-430), + 1620 => $expand!(-428), + 1622 => $expand!(-426), + 1624 => $expand!(-424), + 1626 => $expand!(-422), + 1628 => $expand!(-420), + 1630 => $expand!(-418), + 1632 => $expand!(-416), + 1634 => $expand!(-414), + 1636 => $expand!(-412), + 1638 => $expand!(-410), + 1640 => $expand!(-408), + 1642 => $expand!(-406), + 1644 => $expand!(-404), + 1646 => $expand!(-402), + 1648 => $expand!(-400), + 1650 => $expand!(-398), + 1652 => $expand!(-396), + 1654 => $expand!(-394), + 1656 => $expand!(-392), + 1658 => $expand!(-390), + 1660 => $expand!(-388), + 1662 => $expand!(-386), + 1664 => $expand!(-384), + 1666 => $expand!(-382), + 1668 => $expand!(-380), + 1670 => $expand!(-378), + 1672 => $expand!(-376), + 1674 => $expand!(-374), + 1676 => $expand!(-372), + 1678 => $expand!(-370), + 1680 => $expand!(-368), + 1682 => $expand!(-366), + 1684 => $expand!(-364), + 1686 => $expand!(-362), + 1688 => $expand!(-360), + 1690 => $expand!(-358), + 1692 => $expand!(-356), + 1694 => $expand!(-354), + 1696 => $expand!(-352), + 1698 => $expand!(-350), + 1700 => $expand!(-348), + 1702 => $expand!(-346), + 1704 => $expand!(-344), + 1706 => $expand!(-342), + 1708 => $expand!(-340), + 1710 => $expand!(-338), + 1712 => $expand!(-336), + 1714 => $expand!(-334), + 1716 => $expand!(-332), + 1718 => $expand!(-330), + 1720 => $expand!(-328), + 1722 => $expand!(-326), + 1724 => $expand!(-324), + 1726 => $expand!(-322), + 1728 => $expand!(-320), + 1730 => $expand!(-318), + 1732 => $expand!(-316), + 1734 => $expand!(-314), + 1736 => $expand!(-312), + 1738 => $expand!(-310), + 1740 => $expand!(-308), + 1742 => $expand!(-306), + 1744 => $expand!(-304), + 1746 => $expand!(-302), + 1748 => $expand!(-300), + 1750 => $expand!(-298), + 1752 => $expand!(-296), + 1754 => $expand!(-294), + 1756 => $expand!(-292), + 1758 => $expand!(-290), + 1760 => $expand!(-288), + 1762 => $expand!(-286), + 1764 => $expand!(-284), + 1766 => $expand!(-282), + 1768 => $expand!(-280), + 1770 => $expand!(-278), + 1772 => $expand!(-276), + 1774 => $expand!(-274), + 1776 => $expand!(-272), + 1778 => $expand!(-270), + 1780 => $expand!(-268), + 1782 => $expand!(-266), + 1784 => $expand!(-264), + 1786 => $expand!(-262), + 1788 => $expand!(-260), + 1790 => $expand!(-258), + 1792 => $expand!(-256), + 1794 => $expand!(-254), + 1796 => $expand!(-252), + 1798 => $expand!(-250), + 1800 => $expand!(-248), + 1802 => $expand!(-246), + 1804 => $expand!(-244), + 1806 => $expand!(-242), + 1808 => $expand!(-240), + 1810 => $expand!(-238), + 1812 => $expand!(-236), + 1814 => $expand!(-234), + 1816 => $expand!(-232), + 1818 => $expand!(-230), + 1820 => $expand!(-228), + 1822 => $expand!(-226), + 1824 => $expand!(-224), + 1826 => $expand!(-222), + 1828 => $expand!(-220), + 1830 => $expand!(-218), + 1832 => $expand!(-216), + 1834 => $expand!(-214), + 1836 => $expand!(-212), + 1838 => $expand!(-210), + 1840 => $expand!(-208), + 1842 => $expand!(-206), + 1844 => $expand!(-204), + 1846 => $expand!(-202), + 1848 => $expand!(-200), + 1850 => $expand!(-198), + 1852 => $expand!(-196), + 1854 => $expand!(-194), + 1856 => $expand!(-192), + 1858 => $expand!(-190), + 1860 => $expand!(-188), + 1862 => $expand!(-186), + 1864 => $expand!(-184), + 1866 => $expand!(-182), + 1868 => $expand!(-180), + 1870 => $expand!(-178), + 1872 => $expand!(-176), + 1874 => $expand!(-174), + 1876 => $expand!(-172), + 1878 => $expand!(-170), + 1880 => $expand!(-168), + 1882 => $expand!(-166), + 1884 => $expand!(-164), + 1886 => $expand!(-162), + 1888 => $expand!(-160), + 1890 => $expand!(-158), + 1892 => $expand!(-156), + 1894 => $expand!(-154), + 1896 => $expand!(-152), + 1898 => $expand!(-150), + 1900 => $expand!(-148), + 1902 => $expand!(-146), + 1904 => $expand!(-144), + 1906 => $expand!(-142), + 1908 => $expand!(-140), + 1910 => $expand!(-138), + 1912 => $expand!(-136), + 1914 => $expand!(-134), + 1916 => $expand!(-132), + 1918 => $expand!(-130), + 1920 => $expand!(-128), + 1922 => $expand!(-126), + 1924 => $expand!(-124), + 1926 => $expand!(-122), + 1928 => $expand!(-120), + 1930 => $expand!(-118), + 1932 => $expand!(-116), + 1934 => $expand!(-114), + 1936 => $expand!(-112), + 1938 => $expand!(-110), + 1940 => $expand!(-108), + 1942 => $expand!(-106), + 1944 => $expand!(-104), + 1946 => $expand!(-102), + 1948 => $expand!(-100), + 1950 => $expand!(-98), + 1952 => $expand!(-96), + 1954 => $expand!(-94), + 1956 => $expand!(-92), + 1958 => $expand!(-90), + 1960 => $expand!(-88), + 1962 => $expand!(-86), + 1964 => $expand!(-84), + 1966 => $expand!(-82), + 1968 => $expand!(-80), + 1970 => $expand!(-78), + 1972 => $expand!(-76), + 1974 => $expand!(-74), + 1976 => $expand!(-72), + 1978 => $expand!(-70), + 1980 => $expand!(-68), + 1982 => $expand!(-66), + 1984 => $expand!(-64), + 1986 => $expand!(-62), + 1988 => $expand!(-60), + 1990 => $expand!(-58), + 1992 => $expand!(-56), + 1994 => $expand!(-54), + 1996 => $expand!(-52), + 1998 => $expand!(-50), + 2000 => $expand!(-48), + 2002 => $expand!(-46), + 2004 => $expand!(-44), + 2006 => $expand!(-42), + 2008 => $expand!(-40), + 2010 => $expand!(-38), + 2012 => $expand!(-36), + 2014 => $expand!(-34), + 2016 => $expand!(-32), + 2018 => $expand!(-30), + 2020 => $expand!(-28), + 2022 => $expand!(-26), + 2024 => $expand!(-24), + 2026 => $expand!(-22), + 2028 => $expand!(-20), + 2030 => $expand!(-18), + 2032 => $expand!(-16), + 2034 => $expand!(-14), + 2036 => $expand!(-12), + 2038 => $expand!(-10), + 2040 => $expand!(-8), + 2042 => $expand!(-6), + 2044 => $expand!(-4), + 2046 => $expand!(-2), + _ => $expand!(1022) + } + }; +} + + +macro_rules! constify_imm_s10 { + ($imm_s10:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm_s10) & 0b11_1111_1111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + 7 => $expand!(7), + 8 => $expand!(8), + 9 => $expand!(9), + 10 => $expand!(10), + 11 => $expand!(11), + 12 => $expand!(12), + 13 => $expand!(13), + 14 => $expand!(14), + 15 => $expand!(15), + 16 => $expand!(16), + 17 => $expand!(17), + 18 => $expand!(18), + 19 => $expand!(19), + 20 => $expand!(20), + 21 => $expand!(21), + 22 => $expand!(22), + 23 => $expand!(23), + 24 => $expand!(24), + 25 => $expand!(25), + 26 => $expand!(26), + 27 => $expand!(27), + 28 => $expand!(28), + 29 => $expand!(29), + 30 => $expand!(30), + 31 => $expand!(31), + 32 => $expand!(32), + 33 => $expand!(33), + 34 => $expand!(34), + 35 => $expand!(35), + 36 => $expand!(36), + 37 => $expand!(37), + 38 => $expand!(38), + 39 => $expand!(39), + 40 => $expand!(40), + 41 => $expand!(41), + 42 => $expand!(42), + 43 => $expand!(43), + 44 => $expand!(44), + 45 => $expand!(45), + 46 => $expand!(46), + 47 => $expand!(47), + 48 => $expand!(48), + 49 => $expand!(49), + 50 => $expand!(50), + 51 => $expand!(51), + 52 => $expand!(52), + 53 => $expand!(53), + 54 => $expand!(54), + 55 => $expand!(55), + 56 => $expand!(56), + 57 => $expand!(57), + 58 => $expand!(58), + 59 => $expand!(59), + 60 => $expand!(60), + 61 => $expand!(61), + 62 => $expand!(62), + 63 => $expand!(63), + 64 => $expand!(64), + 65 => $expand!(65), + 66 => $expand!(66), + 67 => $expand!(67), + 68 => $expand!(68), + 69 => $expand!(69), + 70 => $expand!(70), + 71 => $expand!(71), + 72 => $expand!(72), + 73 => $expand!(73), + 74 => $expand!(74), + 75 => $expand!(75), + 76 => $expand!(76), + 77 => $expand!(77), + 78 => $expand!(78), + 79 => $expand!(79), + 80 => $expand!(80), + 81 => $expand!(81), + 82 => $expand!(82), + 83 => $expand!(83), + 84 => $expand!(84), + 85 => $expand!(85), + 86 => $expand!(86), + 87 => $expand!(87), + 88 => $expand!(88), + 89 => $expand!(89), + 90 => $expand!(90), + 91 => $expand!(91), + 92 => $expand!(92), + 93 => $expand!(93), + 94 => $expand!(94), + 95 => $expand!(95), + 96 => $expand!(96), + 97 => $expand!(97), + 98 => $expand!(98), + 99 => $expand!(99), + 100 => $expand!(100), + 101 => $expand!(101), + 102 => $expand!(102), + 103 => $expand!(103), + 104 => $expand!(104), + 105 => $expand!(105), + 106 => $expand!(106), + 107 => $expand!(107), + 108 => $expand!(108), + 109 => $expand!(109), + 110 => $expand!(110), + 111 => $expand!(111), + 112 => $expand!(112), + 113 => $expand!(113), + 114 => $expand!(114), + 115 => $expand!(115), + 116 => $expand!(116), + 117 => $expand!(117), + 118 => $expand!(118), + 119 => $expand!(119), + 120 => $expand!(120), + 121 => $expand!(121), + 122 => $expand!(122), + 123 => $expand!(123), + 124 => $expand!(124), + 125 => $expand!(125), + 126 => $expand!(126), + 127 => $expand!(127), + 128 => $expand!(128), + 129 => $expand!(129), + 130 => $expand!(130), + 131 => $expand!(131), + 132 => $expand!(132), + 133 => $expand!(133), + 134 => $expand!(134), + 135 => $expand!(135), + 136 => $expand!(136), + 137 => $expand!(137), + 138 => $expand!(138), + 139 => $expand!(139), + 140 => $expand!(140), + 141 => $expand!(141), + 142 => $expand!(142), + 143 => $expand!(143), + 144 => $expand!(144), + 145 => $expand!(145), + 146 => $expand!(146), + 147 => $expand!(147), + 148 => $expand!(148), + 149 => $expand!(149), + 150 => $expand!(150), + 151 => $expand!(151), + 152 => $expand!(152), + 153 => $expand!(153), + 154 => $expand!(154), + 155 => $expand!(155), + 156 => $expand!(156), + 157 => $expand!(157), + 158 => $expand!(158), + 159 => $expand!(159), + 160 => $expand!(160), + 161 => $expand!(161), + 162 => $expand!(162), + 163 => $expand!(163), + 164 => $expand!(164), + 165 => $expand!(165), + 166 => $expand!(166), + 167 => $expand!(167), + 168 => $expand!(168), + 169 => $expand!(169), + 170 => $expand!(170), + 171 => $expand!(171), + 172 => $expand!(172), + 173 => $expand!(173), + 174 => $expand!(174), + 175 => $expand!(175), + 176 => $expand!(176), + 177 => $expand!(177), + 178 => $expand!(178), + 179 => $expand!(179), + 180 => $expand!(180), + 181 => $expand!(181), + 182 => $expand!(182), + 183 => $expand!(183), + 184 => $expand!(184), + 185 => $expand!(185), + 186 => $expand!(186), + 187 => $expand!(187), + 188 => $expand!(188), + 189 => $expand!(189), + 190 => $expand!(190), + 191 => $expand!(191), + 192 => $expand!(192), + 193 => $expand!(193), + 194 => $expand!(194), + 195 => $expand!(195), + 196 => $expand!(196), + 197 => $expand!(197), + 198 => $expand!(198), + 199 => $expand!(199), + 200 => $expand!(200), + 201 => $expand!(201), + 202 => $expand!(202), + 203 => $expand!(203), + 204 => $expand!(204), + 205 => $expand!(205), + 206 => $expand!(206), + 207 => $expand!(207), + 208 => $expand!(208), + 209 => $expand!(209), + 210 => $expand!(210), + 211 => $expand!(211), + 212 => $expand!(212), + 213 => $expand!(213), + 214 => $expand!(214), + 215 => $expand!(215), + 216 => $expand!(216), + 217 => $expand!(217), + 218 => $expand!(218), + 219 => $expand!(219), + 220 => $expand!(220), + 221 => $expand!(221), + 222 => $expand!(222), + 223 => $expand!(223), + 224 => $expand!(224), + 225 => $expand!(225), + 226 => $expand!(226), + 227 => $expand!(227), + 228 => $expand!(228), + 229 => $expand!(229), + 230 => $expand!(230), + 231 => $expand!(231), + 232 => $expand!(232), + 233 => $expand!(233), + 234 => $expand!(234), + 235 => $expand!(235), + 236 => $expand!(236), + 237 => $expand!(237), + 238 => $expand!(238), + 239 => $expand!(239), + 240 => $expand!(240), + 241 => $expand!(241), + 242 => $expand!(242), + 243 => $expand!(243), + 244 => $expand!(244), + 245 => $expand!(245), + 246 => $expand!(246), + 247 => $expand!(247), + 248 => $expand!(248), + 249 => $expand!(249), + 250 => $expand!(250), + 251 => $expand!(251), + 252 => $expand!(252), + 253 => $expand!(253), + 254 => $expand!(254), + 255 => $expand!(255), + 256 => $expand!(256), + 257 => $expand!(257), + 258 => $expand!(258), + 259 => $expand!(259), + 260 => $expand!(260), + 261 => $expand!(261), + 262 => $expand!(262), + 263 => $expand!(263), + 264 => $expand!(264), + 265 => $expand!(265), + 266 => $expand!(266), + 267 => $expand!(267), + 268 => $expand!(268), + 269 => $expand!(269), + 270 => $expand!(270), + 271 => $expand!(271), + 272 => $expand!(272), + 273 => $expand!(273), + 274 => $expand!(274), + 275 => $expand!(275), + 276 => $expand!(276), + 277 => $expand!(277), + 278 => $expand!(278), + 279 => $expand!(279), + 280 => $expand!(280), + 281 => $expand!(281), + 282 => $expand!(282), + 283 => $expand!(283), + 284 => $expand!(284), + 285 => $expand!(285), + 286 => $expand!(286), + 287 => $expand!(287), + 288 => $expand!(288), + 289 => $expand!(289), + 290 => $expand!(290), + 291 => $expand!(291), + 292 => $expand!(292), + 293 => $expand!(293), + 294 => $expand!(294), + 295 => $expand!(295), + 296 => $expand!(296), + 297 => $expand!(297), + 298 => $expand!(298), + 299 => $expand!(299), + 300 => $expand!(300), + 301 => $expand!(301), + 302 => $expand!(302), + 303 => $expand!(303), + 304 => $expand!(304), + 305 => $expand!(305), + 306 => $expand!(306), + 307 => $expand!(307), + 308 => $expand!(308), + 309 => $expand!(309), + 310 => $expand!(310), + 311 => $expand!(311), + 312 => $expand!(312), + 313 => $expand!(313), + 314 => $expand!(314), + 315 => $expand!(315), + 316 => $expand!(316), + 317 => $expand!(317), + 318 => $expand!(318), + 319 => $expand!(319), + 320 => $expand!(320), + 321 => $expand!(321), + 322 => $expand!(322), + 323 => $expand!(323), + 324 => $expand!(324), + 325 => $expand!(325), + 326 => $expand!(326), + 327 => $expand!(327), + 328 => $expand!(328), + 329 => $expand!(329), + 330 => $expand!(330), + 331 => $expand!(331), + 332 => $expand!(332), + 333 => $expand!(333), + 334 => $expand!(334), + 335 => $expand!(335), + 336 => $expand!(336), + 337 => $expand!(337), + 338 => $expand!(338), + 339 => $expand!(339), + 340 => $expand!(340), + 341 => $expand!(341), + 342 => $expand!(342), + 343 => $expand!(343), + 344 => $expand!(344), + 345 => $expand!(345), + 346 => $expand!(346), + 347 => $expand!(347), + 348 => $expand!(348), + 349 => $expand!(349), + 350 => $expand!(350), + 351 => $expand!(351), + 352 => $expand!(352), + 353 => $expand!(353), + 354 => $expand!(354), + 355 => $expand!(355), + 356 => $expand!(356), + 357 => $expand!(357), + 358 => $expand!(358), + 359 => $expand!(359), + 360 => $expand!(360), + 361 => $expand!(361), + 362 => $expand!(362), + 363 => $expand!(363), + 364 => $expand!(364), + 365 => $expand!(365), + 366 => $expand!(366), + 367 => $expand!(367), + 368 => $expand!(368), + 369 => $expand!(369), + 370 => $expand!(370), + 371 => $expand!(371), + 372 => $expand!(372), + 373 => $expand!(373), + 374 => $expand!(374), + 375 => $expand!(375), + 376 => $expand!(376), + 377 => $expand!(377), + 378 => $expand!(378), + 379 => $expand!(379), + 380 => $expand!(380), + 381 => $expand!(381), + 382 => $expand!(382), + 383 => $expand!(383), + 384 => $expand!(384), + 385 => $expand!(385), + 386 => $expand!(386), + 387 => $expand!(387), + 388 => $expand!(388), + 389 => $expand!(389), + 390 => $expand!(390), + 391 => $expand!(391), + 392 => $expand!(392), + 393 => $expand!(393), + 394 => $expand!(394), + 395 => $expand!(395), + 396 => $expand!(396), + 397 => $expand!(397), + 398 => $expand!(398), + 399 => $expand!(399), + 400 => $expand!(400), + 401 => $expand!(401), + 402 => $expand!(402), + 403 => $expand!(403), + 404 => $expand!(404), + 405 => $expand!(405), + 406 => $expand!(406), + 407 => $expand!(407), + 408 => $expand!(408), + 409 => $expand!(409), + 410 => $expand!(410), + 411 => $expand!(411), + 412 => $expand!(412), + 413 => $expand!(413), + 414 => $expand!(414), + 415 => $expand!(415), + 416 => $expand!(416), + 417 => $expand!(417), + 418 => $expand!(418), + 419 => $expand!(419), + 420 => $expand!(420), + 421 => $expand!(421), + 422 => $expand!(422), + 423 => $expand!(423), + 424 => $expand!(424), + 425 => $expand!(425), + 426 => $expand!(426), + 427 => $expand!(427), + 428 => $expand!(428), + 429 => $expand!(429), + 430 => $expand!(430), + 431 => $expand!(431), + 432 => $expand!(432), + 433 => $expand!(433), + 434 => $expand!(434), + 435 => $expand!(435), + 436 => $expand!(436), + 437 => $expand!(437), + 438 => $expand!(438), + 439 => $expand!(439), + 440 => $expand!(440), + 441 => $expand!(441), + 442 => $expand!(442), + 443 => $expand!(443), + 444 => $expand!(444), + 445 => $expand!(445), + 446 => $expand!(446), + 447 => $expand!(447), + 448 => $expand!(448), + 449 => $expand!(449), + 450 => $expand!(450), + 451 => $expand!(451), + 452 => $expand!(452), + 453 => $expand!(453), + 454 => $expand!(454), + 455 => $expand!(455), + 456 => $expand!(456), + 457 => $expand!(457), + 458 => $expand!(458), + 459 => $expand!(459), + 460 => $expand!(460), + 461 => $expand!(461), + 462 => $expand!(462), + 463 => $expand!(463), + 464 => $expand!(464), + 465 => $expand!(465), + 466 => $expand!(466), + 467 => $expand!(467), + 468 => $expand!(468), + 469 => $expand!(469), + 470 => $expand!(470), + 471 => $expand!(471), + 472 => $expand!(472), + 473 => $expand!(473), + 474 => $expand!(474), + 475 => $expand!(475), + 476 => $expand!(476), + 477 => $expand!(477), + 478 => $expand!(478), + 479 => $expand!(479), + 480 => $expand!(480), + 481 => $expand!(481), + 482 => $expand!(482), + 483 => $expand!(483), + 484 => $expand!(484), + 485 => $expand!(485), + 486 => $expand!(486), + 487 => $expand!(487), + 488 => $expand!(488), + 489 => $expand!(489), + 490 => $expand!(490), + 491 => $expand!(491), + 492 => $expand!(492), + 493 => $expand!(493), + 494 => $expand!(494), + 495 => $expand!(495), + 496 => $expand!(496), + 497 => $expand!(497), + 498 => $expand!(498), + 499 => $expand!(499), + 500 => $expand!(500), + 501 => $expand!(501), + 502 => $expand!(502), + 503 => $expand!(503), + 504 => $expand!(504), + 505 => $expand!(505), + 506 => $expand!(506), + 507 => $expand!(507), + 508 => $expand!(508), + 509 => $expand!(509), + 510 => $expand!(510), + 512 => $expand!(-512), + 513 => $expand!(-511), + 514 => $expand!(-510), + 515 => $expand!(-509), + 516 => $expand!(-508), + 517 => $expand!(-507), + 518 => $expand!(-506), + 519 => $expand!(-505), + 520 => $expand!(-504), + 521 => $expand!(-503), + 522 => $expand!(-502), + 523 => $expand!(-501), + 524 => $expand!(-500), + 525 => $expand!(-499), + 526 => $expand!(-498), + 527 => $expand!(-497), + 528 => $expand!(-496), + 529 => $expand!(-495), + 530 => $expand!(-494), + 531 => $expand!(-493), + 532 => $expand!(-492), + 533 => $expand!(-491), + 534 => $expand!(-490), + 535 => $expand!(-489), + 536 => $expand!(-488), + 537 => $expand!(-487), + 538 => $expand!(-486), + 539 => $expand!(-485), + 540 => $expand!(-484), + 541 => $expand!(-483), + 542 => $expand!(-482), + 543 => $expand!(-481), + 544 => $expand!(-480), + 545 => $expand!(-479), + 546 => $expand!(-478), + 547 => $expand!(-477), + 548 => $expand!(-476), + 549 => $expand!(-475), + 550 => $expand!(-474), + 551 => $expand!(-473), + 552 => $expand!(-472), + 553 => $expand!(-471), + 554 => $expand!(-470), + 555 => $expand!(-469), + 556 => $expand!(-468), + 557 => $expand!(-467), + 558 => $expand!(-466), + 559 => $expand!(-465), + 560 => $expand!(-464), + 561 => $expand!(-463), + 562 => $expand!(-462), + 563 => $expand!(-461), + 564 => $expand!(-460), + 565 => $expand!(-459), + 566 => $expand!(-458), + 567 => $expand!(-457), + 568 => $expand!(-456), + 569 => $expand!(-455), + 570 => $expand!(-454), + 571 => $expand!(-453), + 572 => $expand!(-452), + 573 => $expand!(-451), + 574 => $expand!(-450), + 575 => $expand!(-449), + 576 => $expand!(-448), + 577 => $expand!(-447), + 578 => $expand!(-446), + 579 => $expand!(-445), + 580 => $expand!(-444), + 581 => $expand!(-443), + 582 => $expand!(-442), + 583 => $expand!(-441), + 584 => $expand!(-440), + 585 => $expand!(-439), + 586 => $expand!(-438), + 587 => $expand!(-437), + 588 => $expand!(-436), + 589 => $expand!(-435), + 590 => $expand!(-434), + 591 => $expand!(-433), + 592 => $expand!(-432), + 593 => $expand!(-431), + 594 => $expand!(-430), + 595 => $expand!(-429), + 596 => $expand!(-428), + 597 => $expand!(-427), + 598 => $expand!(-426), + 599 => $expand!(-425), + 600 => $expand!(-424), + 601 => $expand!(-423), + 602 => $expand!(-422), + 603 => $expand!(-421), + 604 => $expand!(-420), + 605 => $expand!(-419), + 606 => $expand!(-418), + 607 => $expand!(-417), + 608 => $expand!(-416), + 609 => $expand!(-415), + 610 => $expand!(-414), + 611 => $expand!(-413), + 612 => $expand!(-412), + 613 => $expand!(-411), + 614 => $expand!(-410), + 615 => $expand!(-409), + 616 => $expand!(-408), + 617 => $expand!(-407), + 618 => $expand!(-406), + 619 => $expand!(-405), + 620 => $expand!(-404), + 621 => $expand!(-403), + 622 => $expand!(-402), + 623 => $expand!(-401), + 624 => $expand!(-400), + 625 => $expand!(-399), + 626 => $expand!(-398), + 627 => $expand!(-397), + 628 => $expand!(-396), + 629 => $expand!(-395), + 630 => $expand!(-394), + 631 => $expand!(-393), + 632 => $expand!(-392), + 633 => $expand!(-391), + 634 => $expand!(-390), + 635 => $expand!(-389), + 636 => $expand!(-388), + 637 => $expand!(-387), + 638 => $expand!(-386), + 639 => $expand!(-385), + 640 => $expand!(-384), + 641 => $expand!(-383), + 642 => $expand!(-382), + 643 => $expand!(-381), + 644 => $expand!(-380), + 645 => $expand!(-379), + 646 => $expand!(-378), + 647 => $expand!(-377), + 648 => $expand!(-376), + 649 => $expand!(-375), + 650 => $expand!(-374), + 651 => $expand!(-373), + 652 => $expand!(-372), + 653 => $expand!(-371), + 654 => $expand!(-370), + 655 => $expand!(-369), + 656 => $expand!(-368), + 657 => $expand!(-367), + 658 => $expand!(-366), + 659 => $expand!(-365), + 660 => $expand!(-364), + 661 => $expand!(-363), + 662 => $expand!(-362), + 663 => $expand!(-361), + 664 => $expand!(-360), + 665 => $expand!(-359), + 666 => $expand!(-358), + 667 => $expand!(-357), + 668 => $expand!(-356), + 669 => $expand!(-355), + 670 => $expand!(-354), + 671 => $expand!(-353), + 672 => $expand!(-352), + 673 => $expand!(-351), + 674 => $expand!(-350), + 675 => $expand!(-349), + 676 => $expand!(-348), + 677 => $expand!(-347), + 678 => $expand!(-346), + 679 => $expand!(-345), + 680 => $expand!(-344), + 681 => $expand!(-343), + 682 => $expand!(-342), + 683 => $expand!(-341), + 684 => $expand!(-340), + 685 => $expand!(-339), + 686 => $expand!(-338), + 687 => $expand!(-337), + 688 => $expand!(-336), + 689 => $expand!(-335), + 690 => $expand!(-334), + 691 => $expand!(-333), + 692 => $expand!(-332), + 693 => $expand!(-331), + 694 => $expand!(-330), + 695 => $expand!(-329), + 696 => $expand!(-328), + 697 => $expand!(-327), + 698 => $expand!(-326), + 699 => $expand!(-325), + 700 => $expand!(-324), + 701 => $expand!(-323), + 702 => $expand!(-322), + 703 => $expand!(-321), + 704 => $expand!(-320), + 705 => $expand!(-319), + 706 => $expand!(-318), + 707 => $expand!(-317), + 708 => $expand!(-316), + 709 => $expand!(-315), + 710 => $expand!(-314), + 711 => $expand!(-313), + 712 => $expand!(-312), + 713 => $expand!(-311), + 714 => $expand!(-310), + 715 => $expand!(-309), + 716 => $expand!(-308), + 717 => $expand!(-307), + 718 => $expand!(-306), + 719 => $expand!(-305), + 720 => $expand!(-304), + 721 => $expand!(-303), + 722 => $expand!(-302), + 723 => $expand!(-301), + 724 => $expand!(-300), + 725 => $expand!(-299), + 726 => $expand!(-298), + 727 => $expand!(-297), + 728 => $expand!(-296), + 729 => $expand!(-295), + 730 => $expand!(-294), + 731 => $expand!(-293), + 732 => $expand!(-292), + 733 => $expand!(-291), + 734 => $expand!(-290), + 735 => $expand!(-289), + 736 => $expand!(-288), + 737 => $expand!(-287), + 738 => $expand!(-286), + 739 => $expand!(-285), + 740 => $expand!(-284), + 741 => $expand!(-283), + 742 => $expand!(-282), + 743 => $expand!(-281), + 744 => $expand!(-280), + 745 => $expand!(-279), + 746 => $expand!(-278), + 747 => $expand!(-277), + 748 => $expand!(-276), + 749 => $expand!(-275), + 750 => $expand!(-274), + 751 => $expand!(-273), + 752 => $expand!(-272), + 753 => $expand!(-271), + 754 => $expand!(-270), + 755 => $expand!(-269), + 756 => $expand!(-268), + 757 => $expand!(-267), + 758 => $expand!(-266), + 759 => $expand!(-265), + 760 => $expand!(-264), + 761 => $expand!(-263), + 762 => $expand!(-262), + 763 => $expand!(-261), + 764 => $expand!(-260), + 765 => $expand!(-259), + 766 => $expand!(-258), + 767 => $expand!(-257), + 768 => $expand!(-256), + 769 => $expand!(-255), + 770 => $expand!(-254), + 771 => $expand!(-253), + 772 => $expand!(-252), + 773 => $expand!(-251), + 774 => $expand!(-250), + 775 => $expand!(-249), + 776 => $expand!(-248), + 777 => $expand!(-247), + 778 => $expand!(-246), + 779 => $expand!(-245), + 780 => $expand!(-244), + 781 => $expand!(-243), + 782 => $expand!(-242), + 783 => $expand!(-241), + 784 => $expand!(-240), + 785 => $expand!(-239), + 786 => $expand!(-238), + 787 => $expand!(-237), + 788 => $expand!(-236), + 789 => $expand!(-235), + 790 => $expand!(-234), + 791 => $expand!(-233), + 792 => $expand!(-232), + 793 => $expand!(-231), + 794 => $expand!(-230), + 795 => $expand!(-229), + 796 => $expand!(-228), + 797 => $expand!(-227), + 798 => $expand!(-226), + 799 => $expand!(-225), + 800 => $expand!(-224), + 801 => $expand!(-223), + 802 => $expand!(-222), + 803 => $expand!(-221), + 804 => $expand!(-220), + 805 => $expand!(-219), + 806 => $expand!(-218), + 807 => $expand!(-217), + 808 => $expand!(-216), + 809 => $expand!(-215), + 810 => $expand!(-214), + 811 => $expand!(-213), + 812 => $expand!(-212), + 813 => $expand!(-211), + 814 => $expand!(-210), + 815 => $expand!(-209), + 816 => $expand!(-208), + 817 => $expand!(-207), + 818 => $expand!(-206), + 819 => $expand!(-205), + 820 => $expand!(-204), + 821 => $expand!(-203), + 822 => $expand!(-202), + 823 => $expand!(-201), + 824 => $expand!(-200), + 825 => $expand!(-199), + 826 => $expand!(-198), + 827 => $expand!(-197), + 828 => $expand!(-196), + 829 => $expand!(-195), + 830 => $expand!(-194), + 831 => $expand!(-193), + 832 => $expand!(-192), + 833 => $expand!(-191), + 834 => $expand!(-190), + 835 => $expand!(-189), + 836 => $expand!(-188), + 837 => $expand!(-187), + 838 => $expand!(-186), + 839 => $expand!(-185), + 840 => $expand!(-184), + 841 => $expand!(-183), + 842 => $expand!(-182), + 843 => $expand!(-181), + 844 => $expand!(-180), + 845 => $expand!(-179), + 846 => $expand!(-178), + 847 => $expand!(-177), + 848 => $expand!(-176), + 849 => $expand!(-175), + 850 => $expand!(-174), + 851 => $expand!(-173), + 852 => $expand!(-172), + 853 => $expand!(-171), + 854 => $expand!(-170), + 855 => $expand!(-169), + 856 => $expand!(-168), + 857 => $expand!(-167), + 858 => $expand!(-166), + 859 => $expand!(-165), + 860 => $expand!(-164), + 861 => $expand!(-163), + 862 => $expand!(-162), + 863 => $expand!(-161), + 864 => $expand!(-160), + 865 => $expand!(-159), + 866 => $expand!(-158), + 867 => $expand!(-157), + 868 => $expand!(-156), + 869 => $expand!(-155), + 870 => $expand!(-154), + 871 => $expand!(-153), + 872 => $expand!(-152), + 873 => $expand!(-151), + 874 => $expand!(-150), + 875 => $expand!(-149), + 876 => $expand!(-148), + 877 => $expand!(-147), + 878 => $expand!(-146), + 879 => $expand!(-145), + 880 => $expand!(-144), + 881 => $expand!(-143), + 882 => $expand!(-142), + 883 => $expand!(-141), + 884 => $expand!(-140), + 885 => $expand!(-139), + 886 => $expand!(-138), + 887 => $expand!(-137), + 888 => $expand!(-136), + 889 => $expand!(-135), + 890 => $expand!(-134), + 891 => $expand!(-133), + 892 => $expand!(-132), + 893 => $expand!(-131), + 894 => $expand!(-130), + 895 => $expand!(-129), + 896 => $expand!(-128), + 897 => $expand!(-127), + 898 => $expand!(-126), + 899 => $expand!(-125), + 900 => $expand!(-124), + 901 => $expand!(-123), + 902 => $expand!(-122), + 903 => $expand!(-121), + 904 => $expand!(-120), + 905 => $expand!(-119), + 906 => $expand!(-118), + 907 => $expand!(-117), + 908 => $expand!(-116), + 909 => $expand!(-115), + 910 => $expand!(-114), + 911 => $expand!(-113), + 912 => $expand!(-112), + 913 => $expand!(-111), + 914 => $expand!(-110), + 915 => $expand!(-109), + 916 => $expand!(-108), + 917 => $expand!(-107), + 918 => $expand!(-106), + 919 => $expand!(-105), + 920 => $expand!(-104), + 921 => $expand!(-103), + 922 => $expand!(-102), + 923 => $expand!(-101), + 924 => $expand!(-100), + 925 => $expand!(-99), + 926 => $expand!(-98), + 927 => $expand!(-97), + 928 => $expand!(-96), + 929 => $expand!(-95), + 930 => $expand!(-94), + 931 => $expand!(-93), + 932 => $expand!(-92), + 933 => $expand!(-91), + 934 => $expand!(-90), + 935 => $expand!(-89), + 936 => $expand!(-88), + 937 => $expand!(-87), + 938 => $expand!(-86), + 939 => $expand!(-85), + 940 => $expand!(-84), + 941 => $expand!(-83), + 942 => $expand!(-82), + 943 => $expand!(-81), + 944 => $expand!(-80), + 945 => $expand!(-79), + 946 => $expand!(-78), + 947 => $expand!(-77), + 948 => $expand!(-76), + 949 => $expand!(-75), + 950 => $expand!(-74), + 951 => $expand!(-73), + 952 => $expand!(-72), + 953 => $expand!(-71), + 954 => $expand!(-70), + 955 => $expand!(-69), + 956 => $expand!(-68), + 957 => $expand!(-67), + 958 => $expand!(-66), + 959 => $expand!(-65), + 960 => $expand!(-64), + 961 => $expand!(-63), + 962 => $expand!(-62), + 963 => $expand!(-61), + 964 => $expand!(-60), + 965 => $expand!(-59), + 966 => $expand!(-58), + 967 => $expand!(-57), + 968 => $expand!(-56), + 969 => $expand!(-55), + 970 => $expand!(-54), + 971 => $expand!(-53), + 972 => $expand!(-52), + 973 => $expand!(-51), + 974 => $expand!(-50), + 975 => $expand!(-49), + 976 => $expand!(-48), + 977 => $expand!(-47), + 978 => $expand!(-46), + 979 => $expand!(-45), + 980 => $expand!(-44), + 981 => $expand!(-43), + 982 => $expand!(-42), + 983 => $expand!(-41), + 984 => $expand!(-40), + 985 => $expand!(-39), + 986 => $expand!(-38), + 987 => $expand!(-37), + 988 => $expand!(-36), + 989 => $expand!(-35), + 990 => $expand!(-34), + 991 => $expand!(-33), + 992 => $expand!(-32), + 993 => $expand!(-31), + 994 => $expand!(-30), + 995 => $expand!(-29), + 996 => $expand!(-28), + 997 => $expand!(-27), + 998 => $expand!(-26), + 999 => $expand!(-25), + 1000 => $expand!(-24), + 1001 => $expand!(-23), + 1002 => $expand!(-22), + 1003 => $expand!(-21), + 1004 => $expand!(-20), + 1005 => $expand!(-19), + 1006 => $expand!(-18), + 1007 => $expand!(-17), + 1008 => $expand!(-16), + 1009 => $expand!(-15), + 1010 => $expand!(-14), + 1011 => $expand!(-13), + 1012 => $expand!(-12), + 1013 => $expand!(-11), + 1014 => $expand!(-10), + 1015 => $expand!(-9), + 1016 => $expand!(-8), + 1017 => $expand!(-7), + 1018 => $expand!(-6), + 1019 => $expand!(-5), + 1020 => $expand!(-4), + 1021 => $expand!(-3), + 1022 => $expand!(-2), + 1023 => $expand!(-1), + _ => $expand!(511) + } + }; +} + + +macro_rules! constify_imm6 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b11_1111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + 7 => $expand!(7), + 8 => $expand!(8), + 9 => $expand!(9), + 10 => $expand!(10), + 11 => $expand!(11), + 12 => $expand!(12), + 13 => $expand!(13), + 14 => $expand!(14), + 15 => $expand!(15), + 16 => $expand!(16), + 17 => $expand!(17), + 18 => $expand!(18), + 19 => $expand!(19), + 20 => $expand!(20), + 21 => $expand!(21), + 22 => $expand!(22), + 23 => $expand!(23), + 24 => $expand!(24), + 25 => $expand!(25), + 26 => $expand!(26), + 27 => $expand!(27), + 28 => $expand!(28), + 29 => $expand!(29), + 30 => $expand!(30), + 31 => $expand!(31), + 32 => $expand!(32), + 33 => $expand!(33), + 34 => $expand!(34), + 35 => $expand!(35), + 36 => $expand!(36), + 37 => $expand!(37), + 38 => $expand!(38), + 39 => $expand!(39), + 40 => $expand!(40), + 41 => $expand!(41), + 42 => $expand!(42), + 43 => $expand!(43), + 44 => $expand!(44), + 45 => $expand!(45), + 46 => $expand!(46), + 47 => $expand!(47), + 48 => $expand!(48), + 49 => $expand!(49), + 50 => $expand!(50), + 51 => $expand!(51), + 52 => $expand!(52), + 53 => $expand!(53), + 54 => $expand!(54), + 55 => $expand!(55), + 56 => $expand!(56), + 57 => $expand!(57), + 58 => $expand!(58), + 59 => $expand!(59), + 60 => $expand!(60), + 61 => $expand!(61), + 62 => $expand!(62), + _ => $expand!(63), + + } + }; +} + +macro_rules! constify_imm5 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b1_1111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + 7 => $expand!(7), + 8 => $expand!(8), + 9 => $expand!(9), + 10 => $expand!(10), + 11 => $expand!(11), + 12 => $expand!(12), + 13 => $expand!(13), + 14 => $expand!(14), + 15 => $expand!(15), + 16 => $expand!(16), + 17 => $expand!(17), + 18 => $expand!(18), + 19 => $expand!(19), + 20 => $expand!(20), + 21 => $expand!(21), + 22 => $expand!(22), + 23 => $expand!(23), + 24 => $expand!(24), + 25 => $expand!(25), + 26 => $expand!(26), + 27 => $expand!(27), + 28 => $expand!(28), + 29 => $expand!(29), + 30 => $expand!(30), + _ => $expand!(31), + } + }; +} + +macro_rules! constify_imm_s5 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b1_1111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + 7 => $expand!(7), + 8 => $expand!(8), + 9 => $expand!(9), + 10 => $expand!(10), + 11 => $expand!(11), + 12 => $expand!(12), + 13 => $expand!(13), + 14 => $expand!(14), + 16 => $expand!(-16), + 17 => $expand!(-15), + 18 => $expand!(-14), + 19 => $expand!(-13), + 20 => $expand!(-12), + 21 => $expand!(-11), + 22 => $expand!(-10), + 23 => $expand!(-9), + 24 => $expand!(-8), + 25 => $expand!(-7), + 26 => $expand!(-6), + 27 => $expand!(-5), + 28 => $expand!(-4), + 29 => $expand!(-3), + 30 => $expand!(-2), + 31 => $expand!(-1), + _ => $expand!(15) + + } + }; +} + +macro_rules! constify_imm4 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b1111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + 7 => $expand!(7), + 8 => $expand!(8), + 9 => $expand!(9), + 10 => $expand!(10), + 11 => $expand!(11), + 12 => $expand!(12), + 13 => $expand!(13), + 14 => $expand!(14), + _ => $expand!(15), + } + }; +} + +macro_rules! constify_imm3 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + _ => $expand!(7), + } + }; +} + +macro_rules! constify_imm2 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b11 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + _ => $expand!(3), + } + }; +} + +macro_rules! constify_imm1 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match ($imm8) & 0b1 { + 0 => $expand!(0), + _ => $expand!(1) + } + }; +} + diff --git a/crates/simd-test-macro/src/lib.rs b/crates/simd-test-macro/src/lib.rs index 4b0abc6e23..272da43f2d 100644 --- a/crates/simd-test-macro/src/lib.rs +++ b/crates/simd-test-macro/src/lib.rs @@ -67,7 +67,7 @@ pub fn simd_test( "aarch64" => "is_aarch64_feature_detected", "powerpc" | "powerpcle" => "is_powerpc_feature_detected", "powerpc64" | "powerpc64le" => "is_powerpc64_feature_detected", - "mips" | "mipsel" => { + "mips" | "mipsel" | "mipsisa32r6" | "mipsisa32r6el" => { // FIXME: // On MIPS CI run-time feature detection always returns false due // to this qemu bug: https://bugs.launchpad.net/qemu/+bug/1754372 @@ -77,7 +77,7 @@ pub fn simd_test( force_test = true; "is_mips_feature_detected" } - "mips64" | "mips64el" => { + "mips64" | "mips64el" | "mipsisa64r6" | "mipsisa64r6el" => { // FIXME: see above force_test = true; "is_mips64_feature_detected" From 33d75e2b95e8ee6f4eb2cbfd5d26fe8c7a813b8f Mon Sep 17 00:00:00 2001 From: Radovan Birdic Date: Fri, 5 Apr 2019 06:02:44 +0000 Subject: [PATCH 2/3] Added vector types and fixed formating. --- crates/core_arch/src/mips/msa.rs | 8997 ++++++++++++++--------- crates/core_arch/src/mips/msa/macros.rs | 108 +- 2 files changed, 5445 insertions(+), 3660 deletions(-) diff --git a/crates/core_arch/src/mips/msa.rs b/crates/core_arch/src/mips/msa.rs index ad833fa265..5a271852b5 100644 --- a/crates/core_arch/src/mips/msa.rs +++ b/crates/core_arch/src/mips/msa.rs @@ -7,1078 +7,1130 @@ #[cfg(test)] use stdsimd_test::assert_instr; -use core_arch::simd::*; - #[macro_use] mod macros; +types! { + // / MIPS-specific 128-bit wide vector of 16 packed `i8`. + pub struct v16i8( + i8, i8, i8, i8, i8, i8, i8, i8, + i8, i8, i8, i8, i8, i8, i8, i8, + ); + + // / MIPS-specific 128-bit wide vector of 8 packed `i16`. + pub struct v8i16( + i16, i16, i16, i16, i16, i16, i16, i16, + ); + + // / MIPS-specific 128-bit wide vector of 4 packed `i32`. + pub struct v4i32( + i32, i32, i32, i32, + ); + + // / MIPS-specific 128-bit wide vector of 2 packed `i64`. + pub struct v2i64( + i64, i64, + ); + + // / MIPS-specific 128-bit wide vector of 16 packed `u8`. + pub struct v16u8( + u8, u8, u8, u8, u8, u8, u8, u8, + u8, u8, u8, u8, u8, u8, u8, u8, + ); + + // / MIPS-specific 128-bit wide vector of 8 packed `u16`. + pub struct v8u16( + u16, u16, u16, u16, u16, u16, u16, u16, + ); + + // / MIPS-specific 128-bit wide vector of 4 packed `u32`. + pub struct v4u32( + u32, u32, u32, u32, + ); + + // / MIPS-specific 128-bit wide vector of 2 packed `u64`. + pub struct v2u64( + u64, u64, + ); + + // / MIPS-specific 128-bit wide vector of 4 packed `f32`. + pub struct v4f32( + f32, f32, f32, f32, + ); + + // / MIPS-specific 128-bit wide vector of 2 packed `f64`. + pub struct v2f64( + f64, f64, + ); +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.mips.add.a.b"] - fn msa_add_a_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_add_a_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.add.a.h"] - fn msa_add_a_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_add_a_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.add.a.w"] - fn msa_add_a_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_add_a_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.add.a.d"] - fn msa_add_a_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_add_a_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.adds.a.b"] - fn msa_adds_a_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_adds_a_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.adds.a.h"] - fn msa_adds_a_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_adds_a_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.adds.a.w"] - fn msa_adds_a_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_adds_a_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.adds.a.d"] - fn msa_adds_a_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_adds_a_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.adds.s.b"] - fn msa_adds_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_adds_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.adds.s.h"] - fn msa_adds_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_adds_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.adds.s.w"] - fn msa_adds_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_adds_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.adds.s.d"] - fn msa_adds_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_adds_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.adds.u.b"] - fn msa_adds_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_adds_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.adds.u.h"] - fn msa_adds_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_adds_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.adds.u.w"] - fn msa_adds_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_adds_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.adds.u.d"] - fn msa_adds_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_adds_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.addv.b"] - fn msa_addv_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_addv_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.addv.h"] - fn msa_addv_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_addv_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.addv.w"] - fn msa_addv_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_addv_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.addv.d"] - fn msa_addv_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_addv_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.addvi.b"] - fn msa_addvi_b(a: i8x16, b: u32) -> i8x16; + fn msa_addvi_b(a: v16i8, b: u32) -> v16i8; #[link_name = "llvm.mips.addvi.h"] - fn msa_addvi_h(a: i16x8, b: u32) -> i16x8; + fn msa_addvi_h(a: v8i16, b: u32) -> v8i16; #[link_name = "llvm.mips.addvi.w"] - fn msa_addvi_w(a: i32x4, b: u32) -> i32x4; + fn msa_addvi_w(a: v4i32, b: u32) -> v4i32; #[link_name = "llvm.mips.addvi.d"] - fn msa_addvi_d(a: i64x2, b: u32) -> i64x2; + fn msa_addvi_d(a: v2i64, b: u32) -> v2i64; #[link_name = "llvm.mips.and.v"] - fn msa_and_v(a: u8x16, b: u8x16) -> u8x16; + fn msa_and_v(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.andi.b"] - fn msa_andi_b(a: u8x16, b: u32) -> u8x16; + fn msa_andi_b(a: v16u8, b: u32) -> v16u8; #[link_name = "llvm.mips.asub.s.b"] - fn msa_asub_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_asub_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.asub.s.h"] - fn msa_asub_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_asub_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.asub.s.w"] - fn msa_asub_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_asub_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.asub.s.d"] - fn msa_asub_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_asub_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.asub.u.b"] - fn msa_asub_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_asub_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.asub.u.h"] - fn msa_asub_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_asub_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.asub.u.w"] - fn msa_asub_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_asub_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.asub.u.d"] - fn msa_asub_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_asub_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.ave.s.b"] - fn msa_ave_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_ave_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.ave.s.h"] - fn msa_ave_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_ave_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.ave.s.w"] - fn msa_ave_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_ave_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.ave.s.d"] - fn msa_ave_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_ave_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.ave.u.b"] - fn msa_ave_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_ave_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.ave.u.h"] - fn msa_ave_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_ave_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.ave.u.w"] - fn msa_ave_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_ave_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.ave.u.d"] - fn msa_ave_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_ave_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.aver.s.b"] - fn msa_aver_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_aver_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.aver.s.h"] - fn msa_aver_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_aver_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.aver.s.w"] - fn msa_aver_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_aver_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.aver.s.d"] - fn msa_aver_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_aver_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.aver.s.b"] - fn msa_aver_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_aver_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.aver.s.h"] - fn msa_aver_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_aver_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.aver.s.w"] - fn msa_aver_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_aver_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.aver.s.d"] - fn msa_aver_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_aver_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.bclr.b"] - fn msa_bclr_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_bclr_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.bclr.h"] - fn msa_bclr_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_bclr_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.bclr.w"] - fn msa_bclr_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_bclr_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.bclr.d"] - fn msa_bclr_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_bclr_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.bclri.b"] - fn msa_bclri_b(a: u8x16, b: i32) -> u8x16; //imm0_7 + fn msa_bclri_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.bclri.h"] - fn msa_bclri_h(a: u16x8, b: i32) -> u16x8; //imm0_15 + fn msa_bclri_h(a: v8u16, b: i32) -> v8u16; #[link_name = "llvm.mips.bclri.w"] - fn msa_bclri_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + fn msa_bclri_w(a: v4u32, b: i32) -> v4u32; #[link_name = "llvm.mips.bclri.d"] - fn msa_bclri_d(a: u64x2, b: i32) -> u64x2; //imm0_63 + fn msa_bclri_d(a: v2u64, b: i32) -> v2u64; #[link_name = "llvm.mips.binsl.b"] - fn msa_binsl_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + fn msa_binsl_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8; #[link_name = "llvm.mips.binsl.h"] - fn msa_binsl_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8; + fn msa_binsl_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16; #[link_name = "llvm.mips.binsl.w"] - fn msa_binsl_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4; + fn msa_binsl_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32; #[link_name = "llvm.mips.binsl.d"] - fn msa_binsl_d(a: u64x2, b: u64x2, c: u64x2) -> u64x2; + fn msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64; #[link_name = "llvm.mips.binsli.b"] - fn msa_binsli_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + fn msa_binsli_b(a: v16u8, b: v16u8, c: i32) -> v16u8; #[link_name = "llvm.mips.binsli.h"] - fn msa_binsli_h(a: u16x8, b: u16x8, c: i32) -> u16x8; + fn msa_binsli_h(a: v8u16, b: v8u16, c: i32) -> v8u16; #[link_name = "llvm.mips.binsli.w"] - fn msa_binsli_w(a: u32x4, b: u32x4, c: i32) -> u32x4; + fn msa_binsli_w(a: v4u32, b: v4u32, c: i32) -> v4u32; #[link_name = "llvm.mips.binsli.d"] - fn msa_binsli_d(a: u64x2, b: u64x2, c: i32) -> u64x2; + fn msa_binsli_d(a: v2u64, b: v2u64, c: i32) -> v2u64; #[link_name = "llvm.mips.binsr.b"] - fn msa_binsr_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + fn msa_binsr_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8; #[link_name = "llvm.mips.binsr.h"] - fn msa_binsr_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8; + fn msa_binsr_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16; #[link_name = "llvm.mips.binsr.w"] - fn msa_binsr_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4; + fn msa_binsr_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32; #[link_name = "llvm.mips.binsr.d"] - fn msa_binsr_d(a: u64x2, b: u64x2, c: u64x2) -> u64x2; + fn msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64; #[link_name = "llvm.mips.binsri.b"] - fn msa_binsri_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + fn msa_binsri_b(a: v16u8, b: v16u8, c: i32) -> v16u8; #[link_name = "llvm.mips.binsri.h"] - fn msa_binsri_h(a: u16x8, b: u16x8, c: i32) -> u16x8; + fn msa_binsri_h(a: v8u16, b: v8u16, c: i32) -> v8u16; #[link_name = "llvm.mips.binsri.w"] - fn msa_binsri_w(a: u32x4, b: u32x4, c: i32) -> u32x4; + fn msa_binsri_w(a: v4u32, b: v4u32, c: i32) -> v4u32; #[link_name = "llvm.mips.binsri.d"] - fn msa_binsri_d(a: u64x2, b: u64x2, c: i32) -> u64x2; + fn msa_binsri_d(a: v2u64, b: v2u64, c: i32) -> v2u64; #[link_name = "llvm.mips.bmnz.v"] - fn msa_bmnz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + fn msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8; #[link_name = "llvm.mips.bmnzi.b"] - fn msa_bmnzi_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + fn msa_bmnzi_b(a: v16u8, b: v16u8, c: i32) -> v16u8; #[link_name = "llvm.mips.bmz.v"] - fn msa_bmz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + fn msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8; #[link_name = "llvm.mips.bmzi.b"] - fn msa_bmzi_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + fn msa_bmzi_b(a: v16u8, b: v16u8, c: i32) -> v16u8; #[link_name = "llvm.mips.bneg.b"] - fn msa_bneg_b(a:u8x16, b:u8x16) -> u8x16; + fn msa_bneg_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.bneg.h"] - fn msa_bneg_h(a:u16x8, b:u16x8) -> u16x8; + fn msa_bneg_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.bneg.w"] - fn msa_bneg_w(a:u32x4, b:u32x4) -> u32x4; + fn msa_bneg_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.bneg.d"] - fn msa_bneg_d(a:u64x2, b:u64x2) -> u64x2; + fn msa_bneg_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.bnegi.b"] - fn msa_bnegi_b(a: u8x16, b:i32) -> u8x16; + fn msa_bnegi_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.bnegi.h"] - fn msa_bnegi_h(a: u16x8, b:i32) -> u16x8; + fn msa_bnegi_h(a: v8u16, b: i32) -> v8u16; #[link_name = "llvm.mips.bnegi.w"] - fn msa_bnegi_w(a: u32x4, b:i32) -> u32x4; + fn msa_bnegi_w(a: v4u32, b: i32) -> v4u32; #[link_name = "llvm.mips.bnegi.d"] - fn msa_bnegi_d(a: u64x2, b:i32) -> u64x2; + fn msa_bnegi_d(a: v2u64, b: i32) -> v2u64; #[link_name = "llvm.mips.bnz.b"] - fn msa_bnz_b(a: u8x16) -> i32; + fn msa_bnz_b(a: v16u8) -> i32; #[link_name = "llvm.mips.bnz.h"] - fn msa_bnz_h(a: u16x8) -> i32; + fn msa_bnz_h(a: v8u16) -> i32; #[link_name = "llvm.mips.bnz.w"] - fn msa_bnz_w(a: u32x4) -> i32; + fn msa_bnz_w(a: v4u32) -> i32; #[link_name = "llvm.mips.bnz.d"] - fn msa_bnz_d(a: u64x2) -> i32; + fn msa_bnz_d(a: v2u64) -> i32; #[link_name = "llvm.mips.bnz.v"] - fn msa_bnz_v(a: u8x16) -> i32; + fn msa_bnz_v(a: v16u8) -> i32; #[link_name = "llvm.mips.bsel.v"] - fn msa_bsel_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16; + fn msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8; #[link_name = "llvm.mips.bseli.b"] - fn msa_bseli_b(a: u8x16, b: u8x16, c: i32) -> u8x16; + fn msa_bseli_b(a: v16u8, b: v16u8, c: i32) -> v16u8; #[link_name = "llvm.mips.bset.b"] - fn msa_bset_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_bset_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.bset.h"] - fn msa_bset_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_bset_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.bset.w"] - fn msa_bset_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_bset_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.bset.d"] - fn msa_bset_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_bset_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.bseti.b"] - fn msa_bseti_b(a: u8x16, b: i32) -> u8x16; + fn msa_bseti_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.bseti.h"] - fn msa_bseti_h(a: u16x8, b: i32) -> u16x8; + fn msa_bseti_h(a: v8u16, b: i32) -> v8u16; #[link_name = "llvm.mips.bseti.w"] - fn msa_bseti_w(a: u32x4, b: i32) -> u32x4; + fn msa_bseti_w(a: v4u32, b: i32) -> v4u32; #[link_name = "llvm.mips.bseti.d"] - fn msa_bseti_d(a: u64x2, b: i32) -> u64x2; + fn msa_bseti_d(a: v2u64, b: i32) -> v2u64; #[link_name = "llvm.mips.bz.b"] - fn msa_bz_b(a: u8x16) -> i32; + fn msa_bz_b(a: v16u8) -> i32; #[link_name = "llvm.mips.bz.h"] - fn msa_bz_h(a: u16x8) -> i32; + fn msa_bz_h(a: v8u16) -> i32; #[link_name = "llvm.mips.bz.w"] - fn msa_bz_w(a: u32x4) -> i32; + fn msa_bz_w(a: v4u32) -> i32; #[link_name = "llvm.mips.bz.d"] - fn msa_bz_d(a: u64x2) -> i32; + fn msa_bz_d(a: v2u64) -> i32; #[link_name = "llvm.mips.bz.v"] - fn msa_bz_v(a: u8x16) -> i32; + fn msa_bz_v(a: v16u8) -> i32; #[link_name = "llvm.mips.ceq.b"] - fn msa_ceq_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_ceq_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.ceq.h"] - fn msa_ceq_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_ceq_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.ceq.w"] - fn msa_ceq_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_ceq_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.ceq.d"] - fn msa_ceq_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_ceq_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.ceqi.b"] - fn msa_ceqi_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + fn msa_ceqi_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.ceqi.h"] - fn msa_ceqi_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + fn msa_ceqi_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.ceqi.w"] - fn msa_ceqi_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + fn msa_ceqi_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.ceqi.d"] - fn msa_ceqi_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + fn msa_ceqi_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.cfcmsa"] fn msa_cfcmsa(a: i32) -> i32; #[link_name = "llvm.mips.cle.s.b"] - fn msa_cle_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_cle_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.cle.s.h"] - fn msa_cle_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_cle_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.cle.s.w"] - fn msa_cle_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_cle_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.cle.s.d"] - fn msa_cle_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_cle_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.cle.u.b"] - fn msa_cle_u_b(a: u8x16, b: u8x16) -> i8x16; + fn msa_cle_u_b(a: v16u8, b: v16u8) -> v16i8; #[link_name = "llvm.mips.cle.u.h"] - fn msa_cle_u_h(a: u16x8, b: u16x8) -> i16x8; + fn msa_cle_u_h(a: v8u16, b: v8u16) -> v8i16; #[link_name = "llvm.mips.cle.u.w"] - fn msa_cle_u_w(a: u32x4, b: u32x4) -> i32x4; + fn msa_cle_u_w(a: v4u32, b: v4u32) -> v4i32; #[link_name = "llvm.mips.cle.u.d"] - fn msa_cle_u_d(a: u64x2, b: u64x2) -> i64x2; + fn msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64; #[link_name = "llvm.mips.clei.s.b"] - fn msa_clei_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + fn msa_clei_s_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.clei.s.h"] - fn msa_clei_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + fn msa_clei_s_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.clei.s.w"] - fn msa_clei_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + fn msa_clei_s_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.clei.s.d"] - fn msa_clei_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + fn msa_clei_s_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.clei.u.b"] - fn msa_clei_u_b(a: u8x16, b: i32) -> i8x16; //imm0_31 + fn msa_clei_u_b(a: v16u8, b: i32) -> v16i8; #[link_name = "llvm.mips.clei.u.h"] - fn msa_clei_u_h(a: u16x8, b: i32) -> i16x8; //imm0_31 + fn msa_clei_u_h(a: v8u16, b: i32) -> v8i16; #[link_name = "llvm.mips.clei.u.w"] - fn msa_clei_u_w(a: u32x4, b: i32) -> i32x4; //imm0_31 + fn msa_clei_u_w(a: v4u32, b: i32) -> v4i32; #[link_name = "llvm.mips.clei.u.d"] - fn msa_clei_u_d(a: u64x2, b: i32) -> i64x2; //imm0_31 + fn msa_clei_u_d(a: v2u64, b: i32) -> v2i64; #[link_name = "llvm.mips.clt.s.b"] - fn msa_clt_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_clt_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.clt.s.h"] - fn msa_clt_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_clt_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.clt.s.w"] - fn msa_clt_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_clt_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.clt.s.d"] - fn msa_clt_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_clt_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.clt.u.b"] - fn msa_clt_u_b(a: u8x16, b: u8x16) -> i8x16; + fn msa_clt_u_b(a: v16u8, b: v16u8) -> v16i8; #[link_name = "llvm.mips.clt.u.h"] - fn msa_clt_u_h(a: u16x8, b: u16x8) -> i16x8; + fn msa_clt_u_h(a: v8u16, b: v8u16) -> v8i16; #[link_name = "llvm.mips.clt.u.w"] - fn msa_clt_u_w(a: u32x4, b: u32x4) -> i32x4; + fn msa_clt_u_w(a: v4u32, b: v4u32) -> v4i32; #[link_name = "llvm.mips.clt.u.d"] - fn msa_clt_u_d(a: u64x2, b: u64x2) -> i64x2; + fn msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64; #[link_name = "llvm.mips.clti.s.b"] - fn msa_clti_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + fn msa_clti_s_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.clti.s.h"] - fn msa_clti_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + fn msa_clti_s_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.clti.s.w"] - fn msa_clti_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + fn msa_clti_s_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.clti.s.d"] - fn msa_clti_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + fn msa_clti_s_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.clti.u.b"] - fn msa_clti_u_b(a: u8x16, b: i32) -> i8x16; + fn msa_clti_u_b(a: v16u8, b: i32) -> v16i8; #[link_name = "llvm.mips.clti.u.h"] - fn msa_clti_u_h(a: u16x8, b: i32) -> i16x8; + fn msa_clti_u_h(a: v8u16, b: i32) -> v8i16; #[link_name = "llvm.mips.clti.u.w"] - fn msa_clti_u_w(a: u32x4, b: i32) -> i32x4; + fn msa_clti_u_w(a: v4u32, b: i32) -> v4i32; #[link_name = "llvm.mips.clti.u.d"] - fn msa_clti_u_d(a: u64x2, b: i32) -> i64x2; + fn msa_clti_u_d(a: v2u64, b: i32) -> v2i64; #[link_name = "llvm.mips.copy.s.b"] - fn msa_copy_s_b(a: i8x16, b: i32) -> i32; //imm0_15 + fn msa_copy_s_b(a: v16i8, b: i32) -> i32; #[link_name = "llvm.mips.copy.s.h"] - fn msa_copy_s_h(a: i16x8, b: i32) -> i32; //imm0_7 + fn msa_copy_s_h(a: v8i16, b: i32) -> i32; #[link_name = "llvm.mips.copy.s.w"] - fn msa_copy_s_w(a: i32x4, b: i32) -> i32; //imm0_3 + fn msa_copy_s_w(a: v4i32, b: i32) -> i32; #[link_name = "llvm.mips.copy.s.d"] - fn msa_copy_s_d(a: i64x2, b: i32) -> i64; //imm0_1 + fn msa_copy_s_d(a: v2i64, b: i32) -> i64; #[link_name = "llvm.mips.copy.u.b"] - fn msa_copy_u_b(a: i8x16, b: i32) -> u32; //imm0_15 + fn msa_copy_u_b(a: v16i8, b: i32) -> u32; #[link_name = "llvm.mips.copy.u.h"] - fn msa_copy_u_h(a: i16x8, b: i32) -> u32; //imm0_7 + fn msa_copy_u_h(a: v8i16, b: i32) -> u32; #[link_name = "llvm.mips.copy.u.w"] - fn msa_copy_u_w(a: i32x4, b: i32) -> u32; //imm0_3 + fn msa_copy_u_w(a: v4i32, b: i32) -> u32; #[link_name = "llvm.mips.copy.u.d"] - fn msa_copy_u_d(a: i64x2, b: i32) -> u64; //imm0_1 + fn msa_copy_u_d(a: v2i64, b: i32) -> u64; #[link_name = "llvm.mips.ctcmsa"] fn msa_ctcmsa(imm5: i32, a: i32) -> (); #[link_name = "llvm.mips.div.s.b"] - fn msa_div_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_div_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.div.s.h"] - fn msa_div_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_div_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.div.s.w"] - fn msa_div_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_div_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.div.s.d"] - fn msa_div_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_div_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.div.u.b"] - fn msa_div_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_div_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.div.u.h"] - fn msa_div_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_div_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.div.u.w"] - fn msa_div_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_div_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.div.u.d"] - fn msa_div_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_div_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.dotp.s.h"] - fn msa_dotp_s_h(a: i8x16, b : i8x16) -> i16x8; + fn msa_dotp_s_h(a: v16i8, b: v16i8) -> v8i16; #[link_name = "llvm.mips.dotp.s.w"] - fn msa_dotp_s_w(a: i16x8, b : i16x8) -> i32x4; + fn msa_dotp_s_w(a: v8i16, b: v8i16) -> v4i32; #[link_name = "llvm.mips.dotp.s.d"] - fn msa_dotp_s_d(a: i32x4, b : i32x4) -> i64x2; + fn msa_dotp_s_d(a: v4i32, b: v4i32) -> v2i64; #[link_name = "llvm.mips.dotp.u.h"] - fn msa_dotp_u_h(a: u8x16, b : u8x16) -> u16x8; + fn msa_dotp_u_h(a: v16u8, b: v16u8) -> v8u16; #[link_name = "llvm.mips.dotp.u.w"] - fn msa_dotp_u_w(a: u16x8, b : u16x8) -> u32x4; + fn msa_dotp_u_w(a: v8u16, b: v8u16) -> v4u32; #[link_name = "llvm.mips.dotp.u.d"] - fn msa_dotp_u_d(a: u32x4, b : u32x4) -> u64x2; + fn msa_dotp_u_d(a: v4u32, b: v4u32) -> v2u64; #[link_name = "llvm.mips.dpadd.s.h"] - fn msa_dpadd_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8; + fn msa_dpadd_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16; #[link_name = "llvm.mips.dpadd.s.w"] - fn msa_dpadd_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4; + fn msa_dpadd_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32; #[link_name = "llvm.mips.dpadd.s.d"] - fn msa_dpadd_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2; + fn msa_dpadd_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64; #[link_name = "llvm.mips.dpadd.s.h"] - fn msa_dpadd_u_h(a: u16x8, b: u8x16, c: u8x16) -> u16x8; + fn msa_dpadd_u_h(a: v8u16, b: v16u8, c: v16u8) -> v8u16; #[link_name = "llvm.mips.dpadd.u.w"] - fn msa_dpadd_u_w(a: u32x4, b: u16x8, c: u16x8) -> u32x4; + fn msa_dpadd_u_w(a: v4u32, b: v8u16, c: v8u16) -> v4u32; #[link_name = "llvm.mips.dpadd.u.d"] - fn msa_dpadd_u_d(a: u64x2, b: u32x4, c: u32x4) -> u64x2; + fn msa_dpadd_u_d(a: v2u64, b: v4u32, c: v4u32) -> v2u64; #[link_name = "llvm.mips.dpsub.s.h"] - fn msa_dpsub_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8; + fn msa_dpsub_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16; #[link_name = "llvm.mips.dpsub.s.w"] - fn msa_dpsub_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4; + fn msa_dpsub_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32; #[link_name = "llvm.mips.dpsub.s.d"] - fn msa_dpsub_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2; + fn msa_dpsub_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64; #[link_name = "llvm.mips.dpsub.u.h"] - fn msa_dpsub_u_h(a: i16x8, b: u8x16, c: u8x16) -> i16x8; + fn msa_dpsub_u_h(a: v8i16, b: v16u8, c: v16u8) -> v8i16; #[link_name = "llvm.mips.dpsub.u.w"] - fn msa_dpsub_u_w(a: i32x4, b: u16x8, c: u16x8) -> i32x4; + fn msa_dpsub_u_w(a: v4i32, b: v8u16, c: v8u16) -> v4i32; #[link_name = "llvm.mips.dpsub.u.d"] - fn msa_dpsub_u_d(a: i64x2, b: u32x4, c: u32x4) -> i64x2; + fn msa_dpsub_u_d(a: v2i64, b: v4u32, c: v4u32) -> v2i64; #[link_name = "llvm.mips.fadd.w"] - fn msa_fadd_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fadd_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fadd.d"] - fn msa_fadd_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fadd_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.fcaf.w"] - fn msa_fcaf_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcaf_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcaf.d"] - fn msa_fcaf_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcaf_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fceq.w"] - fn msa_fceq_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fceq_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fceq.d"] - fn msa_fceq_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fceq_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fclass.w"] - fn msa_fclass_w(a: f32x4) -> i32x4; + fn msa_fclass_w(a: v4f32) -> v4i32; #[link_name = "llvm.mips.fclass.d"] - fn msa_fclass_d(a: f64x2) -> i64x2; + fn msa_fclass_d(a: v2f64) -> v2i64; #[link_name = "llvm.mips.fcle.w"] - fn msa_fcle_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcle_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcle.d"] - fn msa_fcle_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcle_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fclt.w"] - fn msa_fclt_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fclt_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fclt.d"] - fn msa_fclt_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fclt_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcne.w"] - fn msa_fcne_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcne_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcne.d"] - fn msa_fcne_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcne_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcor.w"] - fn msa_fcor_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcor_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcor.d"] - fn msa_fcor_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcor_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcueq.w"] - fn msa_fcueq_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcueq_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcueq.d"] - fn msa_fcueq_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcueq_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcule.w"] - fn msa_fcule_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcule_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcule.d"] - fn msa_fcule_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcule_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcult.w"] - fn msa_fcult_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcult_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcult.d"] - fn msa_fcult_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcult_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcun.w"] - fn msa_fcun_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcun_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcun.d"] - fn msa_fcun_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcun_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fcune.w"] - fn msa_fcune_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fcune_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fcune.d"] - fn msa_fcune_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fcune_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fdiv.w"] - fn msa_fdiv_w(a: f32x4, b:f32x4) -> f32x4; + fn msa_fdiv_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fdiv.d"] - fn msa_fdiv_d(a: f64x2, b:f64x2) -> f64x2; + fn msa_fdiv_d(a: v2f64, b: v2f64) -> v2f64; // FIXME: 16-bit floats // #[link_name = "llvm.mips.fexdo.h"] - // fn msa_fexdo_h(a: f32x4, b: f32x4) -> f16x8; + // fn msa_fexdo_h(a: v4f32, b: v4f32) -> f16x8; #[link_name = "llvm.mips.fexdo.w"] - fn msa_fexdo_w(a: f64x2, b: f64x2) -> f32x4; + fn msa_fexdo_w(a: v2f64, b: v2f64) -> v4f32; #[link_name = "llvm.mips.fexp2.w"] - fn msa_fexp2_w(a: f32x4, b: i32x4) -> f32x4; + fn msa_fexp2_w(a: v4f32, b: v4i32) -> v4f32; #[link_name = "llvm.mips.fexp2.d"] - fn msa_fexp2_d(a: f64x2, b: i64x2) -> f64x2; + fn msa_fexp2_d(a: v2f64, b: v2i64) -> v2f64; #[link_name = "llvm.mips.fexupl.w"] // FIXME: 16-bit floats - // fn msa_fexupl_w(a: f16x8) -> f32x4; + // fn msa_fexupl_w(a: f16x8) -> v4f32; #[link_name = "llvm.mips.fexupl.d"] - fn msa_fexupl_d(a: f32x4) -> f64x2; + fn msa_fexupl_d(a: v4f32) -> v2f64; // FIXME: 16-bit floats // #[link_name = "llvm.mips.fexupr.w"] - // fn msa_fexupr_w(a: f16x8) -> f32x4; + // fn msa_fexupr_w(a: f16x8) -> v4f32; #[link_name = "llvm.mips.fexupr.d"] - fn msa_fexupr_d(a: f32x4) -> f64x2; + fn msa_fexupr_d(a: v4f32) -> v2f64; #[link_name = "llvm.mips.ffint.s.w"] - fn msa_ffint_s_w(a: i32x4) -> f32x4; + fn msa_ffint_s_w(a: v4i32) -> v4f32; #[link_name = "llvm.mips.ffint.s.d"] - fn msa_ffint_s_d(a: i64x2) -> f64x2; + fn msa_ffint_s_d(a: v2i64) -> v2f64; #[link_name = "llvm.mips.ffint.u.w"] - fn msa_ffint_u_w(a: u32x4) -> f32x4; + fn msa_ffint_u_w(a: v4u32) -> v4f32; #[link_name = "llvm.mips.ffint.u.d"] - fn msa_ffint_u_d(a: u64x2) -> f64x2; + fn msa_ffint_u_d(a: v2u64) -> v2f64; #[link_name = "llvm.mips.ffql.w"] - fn msa_ffql_w(a: i16x8) -> f32x4; + fn msa_ffql_w(a: v8i16) -> v4f32; #[link_name = "llvm.mips.ffql.d"] - fn msa_ffql_d(a: i32x4) -> f64x2; + fn msa_ffql_d(a: v4i32) -> v2f64; #[link_name = "llvm.mips.ffqr.w"] - fn msa_ffqr_w(a: i16x8) -> f32x4; + fn msa_ffqr_w(a: v8i16) -> v4f32; #[link_name = "llvm.mips.ffqr.d"] - fn msa_ffqr_d(a: i32x4) -> f64x2; + fn msa_ffqr_d(a: v4i32) -> v2f64; #[link_name = "llvm.mips.fill.b"] - fn msa_fill_b(a: i32) -> i8x16; + fn msa_fill_b(a: i32) -> v16i8; #[link_name = "llvm.mips.fill.h"] - fn msa_fill_h(a: i32) -> i16x8; + fn msa_fill_h(a: i32) -> v8i16; #[link_name = "llvm.mips.fill.w"] - fn msa_fill_w(a: i32) -> i32x4; + fn msa_fill_w(a: i32) -> v4i32; #[link_name = "llvm.mips.fill.d"] - fn msa_fill_d(a: i64) -> i64x2; + fn msa_fill_d(a: i64) -> v2i64; #[link_name = "llvm.mips.flog2.w"] - fn msa_flog2_w(a: f32x4) -> f32x4; + fn msa_flog2_w(a: v4f32) -> v4f32; #[link_name = "llvm.mips.flog2.d"] - fn msa_flog2_d(a: f64x2) -> f64x2; + fn msa_flog2_d(a: v2f64) -> v2f64; #[link_name = "llvm.mips.fmadd.w"] - fn msa_fmadd_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4; + fn msa_fmadd_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32; #[link_name = "llvm.mips.fmadd.d"] - fn msa_fmadd_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2; + fn msa_fmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64; #[link_name = "llvm.mips.fmax.w"] - fn msa_fmax_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fmax_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fmax.d"] - fn msa_fmax_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fmax_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.fmax.a.w"] - fn msa_fmax_a_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fmax_a_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fmax.a.d"] - fn msa_fmax_a_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fmax_a_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.fmin.w"] - fn msa_fmin_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fmin_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fmin.d"] - fn msa_fmin_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fmin_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.fmin.a.w"] - fn msa_fmin_a_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fmin_a_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fmin.a.d"] - fn msa_fmin_a_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fmin_a_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.fmsub.w"] - fn msa_fmsub_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4; + fn msa_fmsub_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32; #[link_name = "llvm.mips.fmsub.d"] - fn msa_fmsub_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2; + fn msa_fmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64; #[link_name = "llvm.mips.fmul.w"] - fn msa_fmul_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fmul_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fmul.d"] - fn msa_fmul_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fmul_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.frint.w"] - fn msa_frint_w(a: f32x4) -> f32x4; + fn msa_frint_w(a: v4f32) -> v4f32; #[link_name = "llvm.mips.frint.d"] - fn msa_frint_d(a: f64x2) -> f64x2; + fn msa_frint_d(a: v2f64) -> v2f64; #[link_name = "llvm.mips.frcp.w"] - fn msa_frcp_w(a: f32x4) -> f32x4; + fn msa_frcp_w(a: v4f32) -> v4f32; #[link_name = "llvm.mips.frcp.d"] - fn msa_frcp_d(a: f64x2) -> f64x2; + fn msa_frcp_d(a: v2f64) -> v2f64; #[link_name = "llvm.mips.frsqrt.w"] - fn msa_frsqrt_w(a: f32x4) -> f32x4; + fn msa_frsqrt_w(a: v4f32) -> v4f32; #[link_name = "llvm.mips.frsqrt.d"] - fn msa_frsqrt_d(a: f64x2) -> f64x2; + fn msa_frsqrt_d(a: v2f64) -> v2f64; #[link_name = "llvm.mips.fsaf.w"] - fn msa_fsaf_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsaf_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsaf.d"] - fn msa_fsaf_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsaf_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fseq.w"] - fn msa_fseq_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fseq_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fseq.d"] - fn msa_fseq_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fseq_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsle.w"] - fn msa_fsle_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsle_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsle.d"] - fn msa_fsle_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsle_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fslt.w"] - fn msa_fslt_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fslt_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fslt.d"] - fn msa_fslt_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fslt_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsne.w"] - fn msa_fsne_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsne_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsne.d"] - fn msa_fsne_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsne_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsor.w"] - fn msa_fsor_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsor_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsor.d"] - fn msa_fsor_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsor_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsqrt.w"] - fn msa_fsqrt_w(a: f32x4) -> f32x4; + fn msa_fsqrt_w(a: v4f32) -> v4f32; #[link_name = "llvm.mips.fsqrt.d"] - fn msa_fsqrt_d(a: f64x2) -> f64x2; + fn msa_fsqrt_d(a: v2f64) -> v2f64; #[link_name = "llvm.mips.fsub.w"] - fn msa_fsub_w(a: f32x4, b: f32x4) -> f32x4; + fn msa_fsub_w(a: v4f32, b: v4f32) -> v4f32; #[link_name = "llvm.mips.fsub.d"] - fn msa_fsub_d(a: f64x2, b: f64x2) -> f64x2; + fn msa_fsub_d(a: v2f64, b: v2f64) -> v2f64; #[link_name = "llvm.mips.fsueq.w"] - fn msa_fsueq_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsueq_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsueq.d"] - fn msa_fsueq_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsueq_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsule.w"] - fn msa_fsule_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsule_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsule.d"] - fn msa_fsule_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsule_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsult.w"] - fn msa_fsult_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsult_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsult.d"] - fn msa_fsult_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsult_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsun.w"] - fn msa_fsun_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsun_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsun.d"] - fn msa_fsun_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsun_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.fsune.w"] - fn msa_fsune_w(a: f32x4, b: f32x4) -> i32x4; + fn msa_fsune_w(a: v4f32, b: v4f32) -> v4i32; #[link_name = "llvm.mips.fsune.d"] - fn msa_fsune_d(a: f64x2, b: f64x2) -> i64x2; + fn msa_fsune_d(a: v2f64, b: v2f64) -> v2i64; #[link_name = "llvm.mips.ftint.s.w"] - fn msa_ftint_s_w(a: f32x4) -> i32x4; + fn msa_ftint_s_w(a: v4f32) -> v4i32; #[link_name = "llvm.mips.ftint.s.d"] - fn msa_ftint_s_d(a: f64x2) -> i64x2; + fn msa_ftint_s_d(a: v2f64) -> v2i64; #[link_name = "llvm.mips.ftint.u.w"] - fn msa_ftint_u_w(a: f32x4) -> u32x4; + fn msa_ftint_u_w(a: v4f32) -> v4u32; #[link_name = "llvm.mips.ftint.u.d"] - fn msa_ftint_u_d(a: f64x2) -> u64x2; + fn msa_ftint_u_d(a: v2f64) -> v2u64; #[link_name = "llvm.mips.ftq.h"] - fn msa_ftq_h(a: f32x4, b: f32x4) -> i16x8; + fn msa_ftq_h(a: v4f32, b: v4f32) -> v8i16; #[link_name = "llvm.mips.ftq.w"] - fn msa_ftq_w(a: f64x2, b: f64x2) -> i32x4; + fn msa_ftq_w(a: v2f64, b: v2f64) -> v4i32; #[link_name = "llvm.mips.ftrunc.s.w"] - fn msa_ftrunc_s_w(a: f32x4) -> i32x4; + fn msa_ftrunc_s_w(a: v4f32) -> v4i32; #[link_name = "llvm.mips.ftrunc.s.d"] - fn msa_ftrunc_s_d(a: f64x2) -> i64x2; + fn msa_ftrunc_s_d(a: v2f64) -> v2i64; #[link_name = "llvm.mips.ftrunc.u.w"] - fn msa_ftrunc_u_w(a: f32x4) -> u32x4; + fn msa_ftrunc_u_w(a: v4f32) -> v4u32; #[link_name = "llvm.mips.ftrunc.u.d"] - fn msa_ftrunc_u_d(a: f64x2) -> u64x2; + fn msa_ftrunc_u_d(a: v2f64) -> v2u64; #[link_name = "llvm.mips.hadd.s.h"] - fn msa_hadd_s_h(a: i8x16, b: i8x16) -> i16x8; + fn msa_hadd_s_h(a: v16i8, b: v16i8) -> v8i16; #[link_name = "llvm.mips.hadd.s.w"] - fn msa_hadd_s_w(a: i16x8, b: i16x8) -> i32x4; + fn msa_hadd_s_w(a: v8i16, b: v8i16) -> v4i32; #[link_name = "llvm.mips.hadd.s.d"] - fn msa_hadd_s_d(a: i32x4, b: i32x4) -> i64x2; + fn msa_hadd_s_d(a: v4i32, b: v4i32) -> v2i64; #[link_name = "llvm.mips.hadd.u.h"] - fn msa_hadd_u_h(a: u8x16, b: u8x16) -> u16x8; + fn msa_hadd_u_h(a: v16u8, b: v16u8) -> v8u16; #[link_name = "llvm.mips.hadd.u.w"] - fn msa_hadd_u_w(a: u16x8, b: u16x8) -> u32x4; + fn msa_hadd_u_w(a: v8u16, b: v8u16) -> v4u32; #[link_name = "llvm.mips.hadd.u.d"] - fn msa_hadd_u_d(a: u32x4, b: u32x4) -> u64x2; + fn msa_hadd_u_d(a: v4u32, b: v4u32) -> v2u64; #[link_name = "llvm.mips.hsub.s.h"] - fn msa_hsub_s_h(a: i8x16, b: i8x16) -> i16x8; + fn msa_hsub_s_h(a: v16i8, b: v16i8) -> v8i16; #[link_name = "llvm.mips.hsub.s.w"] - fn msa_hsub_s_w(a: i16x8, b: i16x8) -> i32x4; + fn msa_hsub_s_w(a: v8i16, b: v8i16) -> v4i32; #[link_name = "llvm.mips.hsub.s.d"] - fn msa_hsub_s_d(a: i32x4, b: i32x4) -> i64x2; + fn msa_hsub_s_d(a: v4i32, b: v4i32) -> v2i64; #[link_name = "llvm.mips.hsub.u.h"] - fn msa_hsub_u_h(a: u8x16, b: u8x16) -> i16x8; + fn msa_hsub_u_h(a: v16u8, b: v16u8) -> v8i16; #[link_name = "llvm.mips.hsub.u.w"] - fn msa_hsub_u_w(a: u16x8, b: u16x8) -> i32x4; + fn msa_hsub_u_w(a: v8u16, b: v8u16) -> v4i32; #[link_name = "llvm.mips.hsub.u.d"] - fn msa_hsub_u_d(a: u32x4, b: u32x4) -> i64x2; + fn msa_hsub_u_d(a: v4u32, b: v4u32) -> v2i64; #[link_name = "llvm.mips.ilvev.b"] - fn msa_ilvev_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_ilvev_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.ilvev.h"] - fn msa_ilvev_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_ilvev_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.ilvev.w"] - fn msa_ilvev_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_ilvev_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.ilvev.d"] - fn msa_ilvev_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_ilvev_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.ilvl.b"] - fn msa_ilvl_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_ilvl_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.ilvl.h"] - fn msa_ilvl_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_ilvl_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.ilvl.w"] - fn msa_ilvl_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_ilvl_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.ilvl.d"] - fn msa_ilvl_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_ilvl_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.ilvod.b"] - fn msa_ilvod_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_ilvod_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.ilvod.h"] - fn msa_ilvod_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_ilvod_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.ilvod.w"] - fn msa_ilvod_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_ilvod_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.ilvod.d"] - fn msa_ilvod_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_ilvod_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.ilvr.b"] - fn msa_ilvr_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_ilvr_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.ilvr.h"] - fn msa_ilvr_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_ilvr_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.ilvr.w"] - fn msa_ilvr_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_ilvr_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.ilvr.d"] - fn msa_ilvr_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.insert.b"] - fn msa_insert_b(a: i8x16, b: i32, c: i32) -> i8x16; //imm0_15 + fn msa_insert_b(a: v16i8, b: i32, c: i32) -> v16i8; #[link_name = "llvm.mips.insert.h"] - fn msa_insert_h(a: i16x8, b: i32, c: i32) -> i16x8; //imm0_7 + fn msa_insert_h(a: v8i16, b: i32, c: i32) -> v8i16; #[link_name = "llvm.mips.insert.w"] - fn msa_insert_w(a: i32x4, b: i32, c: i32) -> i32x4; //imm0_3 + fn msa_insert_w(a: v4i32, b: i32, c: i32) -> v4i32; #[link_name = "llvm.mips.insert.d"] - fn msa_insert_d(a: i64x2, b: i32, c: i64) -> i64x2; //imm0_1 + fn msa_insert_d(a: v2i64, b: i32, c: i64) -> v2i64; #[link_name = "llvm.mips.insve.b"] - fn msa_insve_b(a: i8x16, b: i32, c: i8x16) -> i8x16; //imm0_15 + fn msa_insve_b(a: v16i8, b: i32, c: v16i8) -> v16i8; #[link_name = "llvm.mips.insve.h"] - fn msa_insve_h(a: i16x8, b: i32, c: i16x8) -> i16x8; //imm0_7 + fn msa_insve_h(a: v8i16, b: i32, c: v8i16) -> v8i16; #[link_name = "llvm.mips.insve.w"] - fn msa_insve_w(a: i32x4, b: i32, c: i32x4) -> i32x4; //imm0_3 + fn msa_insve_w(a: v4i32, b: i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.insve.d"] - fn msa_insve_d(a: i64x2, b: i32, c: i64x2) -> i64x2; //imm0_1 + fn msa_insve_d(a: v2i64, b: i32, c: v2i64) -> v2i64; #[link_name = "llvm.mips.ld.b"] - fn msa_ld_b(mem_addr: *mut i8, b: i32) -> i8x16; //imm_n512_511 + fn msa_ld_b(mem_addr: *mut i8, b: i32) -> v16i8; #[link_name = "llvm.mips.ld.h"] - fn msa_ld_h(mem_addr: *mut i8, b: i32) -> i16x8; //imm_n1024_1022 + fn msa_ld_h(mem_addr: *mut i8, b: i32) -> v8i16; #[link_name = "llvm.mips.ld.w"] - fn msa_ld_w(mem_addr: *mut i8, b: i32) -> i32x4; //imm_n2048_2044 + fn msa_ld_w(mem_addr: *mut i8, b: i32) -> v4i32; #[link_name = "llvm.mips.ld.d"] - fn msa_ld_d(mem_addr: *mut i8, b: i32) -> i64x2; //imm_n4096_4088 + fn msa_ld_d(mem_addr: *mut i8, b: i32) -> v2i64; #[link_name = "llvm.mips.ldi.b"] - fn msa_ldi_b(a: i32) -> i8x16; // imm_n512_511 + fn msa_ldi_b(a: i32) -> v16i8; #[link_name = "llvm.mips.ldi.h"] - fn msa_ldi_h(a: i32) -> i16x8; // imm_n512_511 + fn msa_ldi_h(a: i32) -> v8i16; #[link_name = "llvm.mips.ldi.w"] - fn msa_ldi_w(a: i32) -> i32x4; // imm_n512_511 + fn msa_ldi_w(a: i32) -> v4i32; #[link_name = "llvm.mips.ldi.d"] - fn msa_ldi_d(a: i32) -> i64x2; // imm_n512_511 + fn msa_ldi_d(a: i32) -> v2i64; #[link_name = "llvm.mips.madd.q.h"] - fn msa_madd_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_madd_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.madd.q.w"] - fn msa_madd_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_madd_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.maddr.q.h"] - fn msa_maddr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_maddr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.maddr.q.w"] - fn msa_maddr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_maddr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.maddv.b"] - fn msa_maddv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16; + fn msa_maddv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; #[link_name = "llvm.mips.maddv.h"] - fn msa_maddv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_maddv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.maddv.w"] - fn msa_maddv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_maddv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.maddv.d"] - fn msa_maddv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2; + fn msa_maddv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; #[link_name = "llvm.mips.max.a.b"] - fn msa_max_a_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_max_a_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.max.a.h"] - fn msa_max_a_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_max_a_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.max.a.w"] - fn msa_max_a_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_max_a_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.max.a.d"] - fn msa_max_a_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_max_a_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.max.s.b"] - fn msa_max_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_max_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.max.s.h"] - fn msa_max_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_max_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.max.s.w"] - fn msa_max_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_max_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.max.s.d"] - fn msa_max_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_max_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.max.u.b"] - fn msa_max_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_max_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.max.u.h"] - fn msa_max_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_max_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.max.u.w"] - fn msa_max_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_max_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.max.u.d"] - fn msa_max_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_max_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.maxi.s.b"] - fn msa_maxi_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + fn msa_maxi_s_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.maxi.s.h"] - fn msa_maxi_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + fn msa_maxi_s_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.maxi.s.w"] - fn msa_maxi_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + fn msa_maxi_s_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.maxi.s.d"] - fn msa_maxi_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + fn msa_maxi_s_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.maxi.u.b"] - fn msa_maxi_u_b(a: u8x16, b: i32) -> u8x16; //imm0_31 + fn msa_maxi_u_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.maxi.u.h"] - fn msa_maxi_u_h(a: u16x8, b: i32) -> u16x8; //imm0_31 + fn msa_maxi_u_h(a: v8u16, b: i32) -> v8u16; #[link_name = "llvm.mips.maxi.u.w"] - fn msa_maxi_u_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + fn msa_maxi_u_w(a: v4u32, b: i32) -> v4u32; #[link_name = "llvm.mips.maxi.u.d"] - fn msa_maxi_u_d(a: u64x2, b: i32) -> u64x2; //imm0_31 + fn msa_maxi_u_d(a: v2u64, b: i32) -> v2u64; #[link_name = "llvm.mips.min.a.b"] - fn msa_min_a_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_min_a_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.min.a.h"] - fn msa_min_a_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_min_a_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.min.a.w"] - fn msa_min_a_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_min_a_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.min.a.d"] - fn msa_min_a_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_min_a_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.min.s.b"] - fn msa_min_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_min_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.min.s.h"] - fn msa_min_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_min_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.min.s.w"] - fn msa_min_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_min_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.min.s.d"] - fn msa_min_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_min_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.min.u.b"] - fn msa_min_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_min_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.min.u.h"] - fn msa_min_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_min_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.min.u.w"] - fn msa_min_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_min_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.min.u.d"] - fn msa_min_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_min_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.mini.s.b"] - fn msa_mini_s_b(a: i8x16, b: i32) -> i8x16; //imm_n16_15 + fn msa_mini_s_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.mini.s.h"] - fn msa_mini_s_h(a: i16x8, b: i32) -> i16x8; //imm_n16_15 + fn msa_mini_s_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.mini.s.w"] - fn msa_mini_s_w(a: i32x4, b: i32) -> i32x4; //imm_n16_15 + fn msa_mini_s_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.mini.s.d"] - fn msa_mini_s_d(a: i64x2, b: i32) -> i64x2; //imm_n16_15 + fn msa_mini_s_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.mini.u.b"] - fn msa_mini_u_b(a: u8x16, b: i32) -> u8x16; //imm0_31 + fn msa_mini_u_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.mini.u.h"] - fn msa_mini_u_h(a: u16x8, b: i32) -> u16x8; //imm0_31 + fn msa_mini_u_h(a: v8u16, b: i32) -> v8u16; #[link_name = "llvm.mips.mini.u.w"] - fn msa_mini_u_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + fn msa_mini_u_w(a: v4u32, b: i32) -> v4u32; #[link_name = "llvm.mips.mini.u.d"] - fn msa_mini_u_d(a: u64x2, b: i32) -> u64x2; //imm0_31 + fn msa_mini_u_d(a: v2u64, b: i32) -> v2u64; #[link_name = "llvm.mips.mod.s.b"] - fn msa_mod_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_mod_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.mod.s.h"] - fn msa_mod_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_mod_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.mod.s.w"] - fn msa_mod_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_mod_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.mod.s.d"] - fn msa_mod_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_mod_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.mod.u.b"] - fn msa_mod_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_mod_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.mod.u.h"] - fn msa_mod_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_mod_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.mod.u.w"] - fn msa_mod_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_mod_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.mod.u.d"] - fn msa_mod_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_mod_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.move.v"] - fn msa_move_v(a: i8x16) -> i8x16; + fn msa_move_v(a: v16i8) -> v16i8; #[link_name = "llvm.mips.msub.q.h"] - fn msa_msub_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_msub_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.msub.q.w"] - fn msa_msub_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_msub_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.msubr.q.h"] - fn msa_msubr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_msubr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.msubr.q.w"] - fn msa_msubr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_msubr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.msubv.b"] - fn msa_msubv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16; + fn msa_msubv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; #[link_name = "llvm.mips.msubv.h"] - fn msa_msubv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_msubv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.msubv.w"] - fn msa_msubv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_msubv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.msubv.d"] - fn msa_msubv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2; + fn msa_msubv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; #[link_name = "llvm.mips.mul.q.h"] - fn msa_mul_q_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_mul_q_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.mul.q.w"] - fn msa_mul_q_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_mul_q_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.mulr.q.h"] - fn msa_mulr_q_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_mulr_q_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.mulr.q.w"] - fn msa_mulr_q_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_mulr_q_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.mulv.b"] - fn msa_mulv_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_mulv_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.mulv.h"] - fn msa_mulv_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_mulv_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.mulv.w"] - fn msa_mulv_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_mulv_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.mulv.d"] - fn msa_mulv_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_mulv_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.nloc.b"] - fn msa_nloc_b(a: i8x16) -> i8x16; + fn msa_nloc_b(a: v16i8) -> v16i8; #[link_name = "llvm.mips.nloc.h"] - fn msa_nloc_h(a: i16x8) -> i16x8; + fn msa_nloc_h(a: v8i16) -> v8i16; #[link_name = "llvm.mips.nloc.w"] - fn msa_nloc_w(a: i32x4) -> i32x4; + fn msa_nloc_w(a: v4i32) -> v4i32; #[link_name = "llvm.mips.nloc.d"] - fn msa_nloc_d(a: i64x2) -> i64x2; + fn msa_nloc_d(a: v2i64) -> v2i64; #[link_name = "llvm.mips.nlzc.b"] - fn msa_nlzc_b(a: i8x16) -> i8x16; + fn msa_nlzc_b(a: v16i8) -> v16i8; #[link_name = "llvm.mips.nlzc.h"] - fn msa_nlzc_h(a: i16x8) -> i16x8; + fn msa_nlzc_h(a: v8i16) -> v8i16; #[link_name = "llvm.mips.nlzc.w"] - fn msa_nlzc_w(a: i32x4) -> i32x4; + fn msa_nlzc_w(a: v4i32) -> v4i32; #[link_name = "llvm.mips.nlzc.d"] - fn msa_nlzc_d(a: i64x2) -> i64x2; + fn msa_nlzc_d(a: v2i64) -> v2i64; #[link_name = "llvm.mips.nor.v"] - fn msa_nor_v(a: u8x16, b: u8x16) -> u8x16; + fn msa_nor_v(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.nori.b"] - fn msa_nori_b(a: u8x16, b: i32) -> u8x16; //imm0_255 + fn msa_nori_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.or.v"] - fn msa_or_v(a: u8x16, b: u8x16) -> u8x16; + fn msa_or_v(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.ori.b"] - fn msa_ori_b(a: u8x16, b: i32) -> u8x16; //imm0_255 + fn msa_ori_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.pckev.b"] - fn msa_pckev_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_pckev_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.pckev.h"] - fn msa_pckev_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_pckev_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.pckev.w"] - fn msa_pckev_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_pckev_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.pckev.d"] - fn msa_pckev_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_pckev_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.pckod.b"] - fn msa_pckod_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_pckod_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.pckod.h"] - fn msa_pckod_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_pckod_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.pckod.w"] - fn msa_pckod_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_pckod_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.pckod.d"] - fn msa_pckod_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_pckod_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.pcnt.b"] - fn msa_pcnt_b(a: i8x16) -> i8x16; + fn msa_pcnt_b(a: v16i8) -> v16i8; #[link_name = "llvm.mips.pcnt.h"] - fn msa_pcnt_h(a: i16x8) -> i16x8; + fn msa_pcnt_h(a: v8i16) -> v8i16; #[link_name = "llvm.mips.pcnt.w"] - fn msa_pcnt_w(a: i32x4) -> i32x4; + fn msa_pcnt_w(a: v4i32) -> v4i32; #[link_name = "llvm.mips.pcnt.d"] - fn msa_pcnt_d(a: i64x2) -> i64x2; + fn msa_pcnt_d(a: v2i64) -> v2i64; #[link_name = "llvm.mips.sat.s.b"] - fn msa_sat_s_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + fn msa_sat_s_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.sat.s.h"] - fn msa_sat_s_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + fn msa_sat_s_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.sat.s.w"] - fn msa_sat_s_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + fn msa_sat_s_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.sat.s.d"] - fn msa_sat_s_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + fn msa_sat_s_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.sat.u.b"] - fn msa_sat_u_b(a: u8x16, b: i32) -> u8x16; //imm0_7 + fn msa_sat_u_b(a: v16u8, b: i32) -> v16u8; #[link_name = "llvm.mips.sat.u.h"] - fn msa_sat_u_h(a: u16x8, b: i32) -> u16x8; //imm0_15 + fn msa_sat_u_h(a: v8u16, b: i32) -> v8u16; #[link_name = "llvm.mips.sat.u.w"] - fn msa_sat_u_w(a: u32x4, b: i32) -> u32x4; //imm0_31 + fn msa_sat_u_w(a: v4u32, b: i32) -> v4u32; #[link_name = "llvm.mips.sat.u.d"] - fn msa_sat_u_d(a: u64x2, b: i32) -> u64x2; //imm0_63 + fn msa_sat_u_d(a: v2u64, b: i32) -> v2u64; #[link_name = "llvm.mips.shf.b"] - fn msa_shf_b(a: i8x16, b: i32) -> i8x16; //imm0_255 + fn msa_shf_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.shf.h"] - fn msa_shf_h(a: i16x8, b: i32) -> i16x8; //imm0_255 + fn msa_shf_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.shf.w"] - fn msa_shf_w(a: i32x4, b: i32) -> i32x4; //imm0_255 + fn msa_shf_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.sld.b"] - fn msa_sld_b(a: i8x16, b: i8x16, c: i32) -> i8x16; + fn msa_sld_b(a: v16i8, b: v16i8, c: i32) -> v16i8; #[link_name = "llvm.mips.sld.h"] - fn msa_sld_h(a: i16x8, b: i16x8, c: i32) -> i16x8; + fn msa_sld_h(a: v8i16, b: v8i16, c: i32) -> v8i16; #[link_name = "llvm.mips.sld.w"] - fn msa_sld_w(a: i32x4, b: i32x4, c: i32) -> i32x4; + fn msa_sld_w(a: v4i32, b: v4i32, c: i32) -> v4i32; #[link_name = "llvm.mips.sld.d"] - fn msa_sld_d(a: i64x2, b: i64x2, c: i32) -> i64x2; + fn msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64; #[link_name = "llvm.mips.sldi.b"] - fn msa_sldi_b(a: i8x16, b: i8x16, c: i32) -> i8x16; //imm0_15 + fn msa_sldi_b(a: v16i8, b: v16i8, c: i32) -> v16i8; #[link_name = "llvm.mips.sldi.h"] - fn msa_sldi_h(a: i16x8, b: i16x8, c: i32) -> i16x8; //imm0_7 + fn msa_sldi_h(a: v8i16, b: v8i16, c: i32) -> v8i16; #[link_name = "llvm.mips.sldi.w"] - fn msa_sldi_w(a: i32x4, b: i32x4, c: i32) -> i32x4; //imm0_3 + fn msa_sldi_w(a: v4i32, b: v4i32, c: i32) -> v4i32; #[link_name = "llvm.mips.sldi.d"] - fn msa_sldi_d(a: i64x2, b: i64x2, c: i32) -> i64x2; //imm0_1 + fn msa_sldi_d(a: v2i64, b: v2i64, c: i32) -> v2i64; #[link_name = "llvm.mips.sll.b"] - fn msa_sll_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_sll_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.sll.h"] - fn msa_sll_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_sll_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.sll.w"] - fn msa_sll_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_sll_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.sll.d"] - fn msa_sll_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_sll_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.slli.b"] - fn msa_slli_b(a: i8x16, b: i32) -> i8x16; //imm0_15 + fn msa_slli_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.slli.h"] - fn msa_slli_h(a: i16x8, b: i32) -> i16x8; //imm0_7 + fn msa_slli_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.slli.w"] - fn msa_slli_w(a: i32x4, b: i32) -> i32x4; //imm0_3 + fn msa_slli_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.slli.d"] - fn msa_slli_d(a: i64x2, b: i32) -> i64x2; //imm0_1 + fn msa_slli_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.splat.b"] - fn msa_splat_b(a: i8x16, c: i32) -> i8x16; + fn msa_splat_b(a: v16i8, c: i32) -> v16i8; #[link_name = "llvm.mips.splat.h"] - fn msa_splat_h(a: i16x8, c: i32) -> i16x8; + fn msa_splat_h(a: v8i16, c: i32) -> v8i16; #[link_name = "llvm.mips.splat.w"] - fn msa_splat_w(a: i32x4, w: i32) -> i32x4; + fn msa_splat_w(a: v4i32, w: i32) -> v4i32; #[link_name = "llvm.mips.splat.d"] - fn msa_splat_d(a: i64x2, c: i32) -> i64x2; + fn msa_splat_d(a: v2i64, c: i32) -> v2i64; #[link_name = "llvm.mips.splati.b"] - fn msa_splati_b(a: i8x16, b: i32) -> i8x16; //imm0_15 + fn msa_splati_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.splati.h"] - fn msa_splati_h(a: i16x8, b: i32) -> i16x8; //imm0_7 + fn msa_splati_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.splati.w"] - fn msa_splati_w(a: i32x4, b: i32) -> i32x4; //imm0_3 + fn msa_splati_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.splati.d"] - fn msa_splati_d(a: i64x2, b: i32) -> i64x2; //imm0_1 + fn msa_splati_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.sra.b"] - fn msa_sra_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_sra_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.sra.h"] - fn msa_sra_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_sra_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.sra.w"] - fn msa_sra_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_sra_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.sra.d"] - fn msa_sra_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_sra_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.srai.b"] - fn msa_srai_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + fn msa_srai_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.srai.h"] - fn msa_srai_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + fn msa_srai_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.srai.w"] - fn msa_srai_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + fn msa_srai_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.srai.d"] - fn msa_srai_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + fn msa_srai_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.srar.b"] - fn msa_srar_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_srar_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.srar.h"] - fn msa_srar_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_srar_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.srar.w"] - fn msa_srar_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_srar_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.srar.d"] - fn msa_srar_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_srar_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.srari.b"] - fn msa_srari_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + fn msa_srari_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.srari.h"] - fn msa_srari_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + fn msa_srari_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.srari.w"] - fn msa_srari_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + fn msa_srari_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.srari.d"] - fn msa_srari_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + fn msa_srari_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.srl.b"] - fn msa_srl_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_srl_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.srl.h"] - fn msa_srl_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_srl_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.srl.w"] - fn msa_srl_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_srl_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.srl.d"] - fn msa_srl_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_srl_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.srli.b"] - fn msa_srli_b(a: i8x16, b: i32) -> i8x16; //imm0_15 + fn msa_srli_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.srli.h"] - fn msa_srli_h(a: i16x8, b: i32) -> i16x8; //imm0_7 + fn msa_srli_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.srli.w"] - fn msa_srli_w(a: i32x4, b: i32) -> i32x4; //imm0_3 + fn msa_srli_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.srli.d"] - fn msa_srli_d(a: i64x2, b: i32) -> i64x2; //imm0_1 + fn msa_srli_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.srlr.b"] - fn msa_srlr_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_srlr_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.srlr.h"] - fn msa_srlr_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_srlr_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.srlr.w"] - fn msa_srlr_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_srlr_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.srlr.d"] - fn msa_srlr_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_srlr_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.srlri.b"] - fn msa_srlri_b(a: i8x16, b: i32) -> i8x16; //imm0_7 + fn msa_srlri_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.srlri.h"] - fn msa_srlri_h(a: i16x8, b: i32) -> i16x8; //imm0_15 + fn msa_srlri_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.srlri.w"] - fn msa_srlri_w(a: i32x4, b: i32) -> i32x4; //imm0_31 + fn msa_srlri_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.srlri.d"] - fn msa_srlri_d(a: i64x2, b: i32) -> i64x2; //imm0_63 + fn msa_srlri_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.st.b"] - fn msa_st_b(a: i8x16, mem_addr: *mut i8, imm_s10: i32) -> (); //imm_n512_511 + fn msa_st_b(a: v16i8, mem_addr: *mut i8, imm_s10: i32) -> (); #[link_name = "llvm.mips.st.h"] - fn msa_st_h(a: i16x8, mem_addr: *mut i8, imm_s11: i32) -> (); //imm_n1024_1022 + fn msa_st_h(a: v8i16, mem_addr: *mut i8, imm_s11: i32) -> (); #[link_name = "llvm.mips.st.w"] - fn msa_st_w(a: i32x4, mem_addr: *mut i8, imm_s12: i32) -> (); //imm_n2048_2044 + fn msa_st_w(a: v4i32, mem_addr: *mut i8, imm_s12: i32) -> (); #[link_name = "llvm.mips.st.d"] - fn msa_st_d(a: i64x2, mem_addr: *mut i8, imm_s13: i32) -> (); //imm_n4096_4088 + fn msa_st_d(a: v2i64, mem_addr: *mut i8, imm_s13: i32) -> (); #[link_name = "llvm.mips.subs.s.b"] - fn msa_subs_s_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_subs_s_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.subs.s.h"] - fn msa_subs_s_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_subs_s_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.subs.s.w"] - fn msa_subs_s_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_subs_s_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.subs.s.d"] - fn msa_subs_s_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_subs_s_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.subs.u.b"] - fn msa_subs_u_b(a: u8x16, b: u8x16) -> u8x16; + fn msa_subs_u_b(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.subs.u.h"] - fn msa_subs_u_h(a: u16x8, b: u16x8) -> u16x8; + fn msa_subs_u_h(a: v8u16, b: v8u16) -> v8u16; #[link_name = "llvm.mips.subs.u.w"] - fn msa_subs_u_w(a: u32x4, b: u32x4) -> u32x4; + fn msa_subs_u_w(a: v4u32, b: v4u32) -> v4u32; #[link_name = "llvm.mips.subs.u.d"] - fn msa_subs_u_d(a: u64x2, b: u64x2) -> u64x2; + fn msa_subs_u_d(a: v2u64, b: v2u64) -> v2u64; #[link_name = "llvm.mips.subsus.u.b"] - fn msa_subsus_u_b(a: u8x16, b: i8x16) -> u8x16; + fn msa_subsus_u_b(a: v16u8, b: v16i8) -> v16u8; #[link_name = "llvm.mips.subsus.u.h"] - fn msa_subsus_u_h(a: u16x8, b: i16x8) -> u16x8; + fn msa_subsus_u_h(a: v8u16, b: v8i16) -> v8u16; #[link_name = "llvm.mips.subsus.u.w"] - fn msa_subsus_u_w(a: u32x4, b: i32x4) -> u32x4; + fn msa_subsus_u_w(a: v4u32, b: v4i32) -> v4u32; #[link_name = "llvm.mips.subsus.u.d"] - fn msa_subsus_u_d(a: u64x2, b: i64x2) -> u64x2; + fn msa_subsus_u_d(a: v2u64, b: v2i64) -> v2u64; #[link_name = "llvm.mips.subsuu.s.b"] - fn msa_subsuu_s_b(a: u8x16, b: u8x16) -> i8x16; + fn msa_subsuu_s_b(a: v16u8, b: v16u8) -> v16i8; #[link_name = "llvm.mips.subsuu.s.h"] - fn msa_subsuu_s_h(a: u16x8, b: u16x8) -> i16x8; + fn msa_subsuu_s_h(a: v8u16, b: v8u16) -> v8i16; #[link_name = "llvm.mips.subsuu.s.w"] - fn msa_subsuu_s_w(a: u32x4, b: u32x4) -> i32x4; + fn msa_subsuu_s_w(a: v4u32, b: v4u32) -> v4i32; #[link_name = "llvm.mips.subsuu.s.d"] - fn msa_subsuu_s_d(a: u64x2, b: u64x2) -> i64x2; + fn msa_subsuu_s_d(a: v2u64, b: v2u64) -> v2i64; #[link_name = "llvm.mips.subv.b"] - fn msa_subv_b(a: i8x16, b: i8x16) -> i8x16; + fn msa_subv_b(a: v16i8, b: v16i8) -> v16i8; #[link_name = "llvm.mips.subv.h"] - fn msa_subv_h(a: i16x8, b: i16x8) -> i16x8; + fn msa_subv_h(a: v8i16, b: v8i16) -> v8i16; #[link_name = "llvm.mips.subv.w"] - fn msa_subv_w(a: i32x4, b: i32x4) -> i32x4; + fn msa_subv_w(a: v4i32, b: v4i32) -> v4i32; #[link_name = "llvm.mips.subv.d"] - fn msa_subv_d(a: i64x2, b: i64x2) -> i64x2; + fn msa_subv_d(a: v2i64, b: v2i64) -> v2i64; #[link_name = "llvm.mips.subvi.b"] - fn msa_subvi_b(a: i8x16, b: i32) -> i8x16; + fn msa_subvi_b(a: v16i8, b: i32) -> v16i8; #[link_name = "llvm.mips.subvi.h"] - fn msa_subvi_h(a: i16x8, b: i32) -> i16x8; + fn msa_subvi_h(a: v8i16, b: i32) -> v8i16; #[link_name = "llvm.mips.subvi.w"] - fn msa_subvi_w(a: i32x4, b: i32) -> i32x4; + fn msa_subvi_w(a: v4i32, b: i32) -> v4i32; #[link_name = "llvm.mips.subvi.d"] - fn msa_subvi_d(a: i64x2, b: i32) -> i64x2; + fn msa_subvi_d(a: v2i64, b: i32) -> v2i64; #[link_name = "llvm.mips.vshf.b"] - fn msa_vshf_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16; + fn msa_vshf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8; #[link_name = "llvm.mips.vshf.h"] - fn msa_vshf_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8; + fn msa_vshf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16; #[link_name = "llvm.mips.vshf.w"] - fn msa_vshf_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4; + fn msa_vshf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32; #[link_name = "llvm.mips.vshf.d"] - fn msa_vshf_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2; + fn msa_vshf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64; #[link_name = "llvm.mips.xor.v"] - fn msa_xor_v(a: u8x16, b: u8x16) -> u8x16; + fn msa_xor_v(a: v16u8, b: v16u8) -> v16u8; #[link_name = "llvm.mips.xori.b"] - fn msa_xori_b(a: u8x16, b: i32) -> u8x16; //imm0_255 - } + fn msa_xori_b(a: v16u8, b: i32) -> v16u8; +} /// Vector Add Absolute Values. /// @@ -1089,8 +1141,8 @@ extern "C" { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.b))] -unsafe fn __msa_add_a_b(a: i8x16, b: i8x16) -> i8x16 { - msa_add_a_b(a, b) +unsafe fn __msa_add_a_b(a: v16i8, b: v16i8) -> v16i8 { + msa_add_a_b(a, ::mem::transmute(b)) } /// Vector Add Absolute Values @@ -1102,8 +1154,8 @@ unsafe fn __msa_add_a_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.h))] -unsafe fn __msa_add_a_h(a: i16x8, b: i16x8) -> i16x8 { - msa_add_a_h(a, b) +unsafe fn __msa_add_a_h(a: v8i16, b: v8i16) -> v8i16 { + msa_add_a_h(a, ::mem::transmute(b)) } /// Vector Add Absolute Values @@ -1115,8 +1167,8 @@ unsafe fn __msa_add_a_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.w))] -unsafe fn __msa_add_a_w(a: i32x4, b: i32x4) -> i32x4 { - msa_add_a_w(a, b) +unsafe fn __msa_add_a_w(a: v4i32, b: v4i32) -> v4i32 { + msa_add_a_w(a, ::mem::transmute(b)) } /// Vector Add Absolute Values @@ -1128,8 +1180,8 @@ unsafe fn __msa_add_a_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.d))] -unsafe fn __msa_add_a_d(a: i64x2, b: i64x2) -> i64x2 { - msa_add_a_d(a, b) +unsafe fn __msa_add_a_d(a: v2i64, b: v2i64) -> v2i64 { + msa_add_a_d(a, ::mem::transmute(b)) } /// Signed Saturated Vector Saturated Add of Absolute Values @@ -1141,8 +1193,8 @@ unsafe fn __msa_add_a_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.b))] -unsafe fn __msa_adds_a_b(a: i8x16, b: i8x16) -> i8x16 { - msa_adds_a_b(a, b) +unsafe fn __msa_adds_a_b(a: v16i8, b: v16i8) -> v16i8 { + msa_adds_a_b(a, ::mem::transmute(b)) } /// Vector Saturated Add of Absolute Values @@ -1154,8 +1206,8 @@ unsafe fn __msa_adds_a_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.h))] -unsafe fn __msa_adds_a_h(a: i16x8, b: i16x8) -> i16x8 { - msa_adds_a_h(a, b) +unsafe fn __msa_adds_a_h(a: v8i16, b: v8i16) -> v8i16 { + msa_adds_a_h(a, ::mem::transmute(b)) } /// Vector Saturated Add of Absolute Values @@ -1167,8 +1219,8 @@ unsafe fn __msa_adds_a_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.w))] -unsafe fn __msa_adds_a_w(a: i32x4, b: i32x4) -> i32x4 { - msa_adds_a_w(a, b) +unsafe fn __msa_adds_a_w(a: v4i32, b: v4i32) -> v4i32 { + msa_adds_a_w(a, ::mem::transmute(b)) } /// Vector Saturated Add of Absolute Values @@ -1180,8 +1232,8 @@ unsafe fn __msa_adds_a_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.d))] -unsafe fn __msa_adds_a_d(a: i64x2, b: i64x2) -> i64x2 { - msa_adds_a_d(a, b) +unsafe fn __msa_adds_a_d(a: v2i64, b: v2i64) -> v2i64 { + msa_adds_a_d(a, ::mem::transmute(b)) } /// Vector Signed Saturated Add of Signed Values @@ -1194,8 +1246,8 @@ unsafe fn __msa_adds_a_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.b))] -unsafe fn __msa_adds_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_adds_s_b(a, b) +unsafe fn __msa_adds_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_adds_s_b(a, ::mem::transmute(b)) } /// Vector Signed Saturated Add of Signed Values @@ -1208,8 +1260,8 @@ unsafe fn __msa_adds_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.h))] -unsafe fn __msa_adds_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_adds_s_h(a, b) +unsafe fn __msa_adds_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_adds_s_h(a, ::mem::transmute(b)) } /// Vector Signed Saturated Add of Signed Values @@ -1222,8 +1274,8 @@ unsafe fn __msa_adds_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.w))] -unsafe fn __msa_adds_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_adds_s_w(a, b) +unsafe fn __msa_adds_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_adds_s_w(a, ::mem::transmute(b)) } /// Vector Signed Saturated Add of Signed Values @@ -1236,8 +1288,8 @@ unsafe fn __msa_adds_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.d))] -unsafe fn __msa_adds_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_adds_s_d(a, b) +unsafe fn __msa_adds_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_adds_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Add of Unsigned Values @@ -1250,8 +1302,8 @@ unsafe fn __msa_adds_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.b))] -unsafe fn __msa_adds_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_adds_u_b(a, b) +unsafe fn __msa_adds_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_adds_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Add of Unsigned Values @@ -1264,8 +1316,8 @@ unsafe fn __msa_adds_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.h))] -unsafe fn __msa_adds_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_adds_u_h(a, b) +unsafe fn __msa_adds_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_adds_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Add of Unsigned Values @@ -1278,8 +1330,8 @@ unsafe fn __msa_adds_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.w))] -unsafe fn __msa_adds_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_adds_u_w(a, b) +unsafe fn __msa_adds_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_adds_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Add of Unsigned Values @@ -1292,8 +1344,8 @@ unsafe fn __msa_adds_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.d))] -unsafe fn __msa_adds_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_adds_u_d(a, b) +unsafe fn __msa_adds_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_adds_u_d(a, ::mem::transmute(b)) } /// Vector Add @@ -1305,8 +1357,8 @@ unsafe fn __msa_adds_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.b))] -unsafe fn __msa_addv_b(a: i8x16, b: i8x16) -> i8x16 { - msa_addv_b(a, b) +unsafe fn __msa_addv_b(a: v16i8, b: v16i8) -> v16i8 { + msa_addv_b(a, ::mem::transmute(b)) } /// Vector Add @@ -1318,8 +1370,8 @@ unsafe fn __msa_addv_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.h))] -unsafe fn __msa_addv_h(a: i16x8, b: i16x8) -> i16x8 { - msa_addv_h(a, b) +unsafe fn __msa_addv_h(a: v8i16, b: v8i16) -> v8i16 { + msa_addv_h(a, ::mem::transmute(b)) } /// Vector Add @@ -1331,8 +1383,8 @@ unsafe fn __msa_addv_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.w))] -unsafe fn __msa_addv_w(a: i32x4, b: i32x4) -> i32x4 { - msa_addv_w(a, b) +unsafe fn __msa_addv_w(a: v4i32, b: v4i32) -> v4i32 { + msa_addv_w(a, ::mem::transmute(b)) } /// Vector Add @@ -1344,11 +1396,10 @@ unsafe fn __msa_addv_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.d))] -unsafe fn __msa_addv_d(a: i64x2, b: i64x2) -> i64x2 { - msa_addv_d(a, b) +unsafe fn __msa_addv_d(a: v2i64, b: v2i64) -> v2i64 { + msa_addv_d(a, ::mem::transmute(b)) } - /// Immediate Add /// /// The 5-bit immediate unsigned value u5 is added to the elements @@ -1359,7 +1410,7 @@ unsafe fn __msa_addv_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.b, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_addvi_b(a: i8x16, imm5: u32) -> i8x16 { +unsafe fn __msa_addvi_b(a: v16i8, imm5: u32) -> v16i8 { macro_rules! call { ($imm5:expr) => { msa_addvi_b(a, $imm5) @@ -1378,7 +1429,7 @@ unsafe fn __msa_addvi_b(a: i8x16, imm5: u32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.h, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_addvi_h(a: i16x8, imm5: u32) -> i16x8 { +unsafe fn __msa_addvi_h(a: v8i16, imm5: u32) -> v8i16 { macro_rules! call { ($imm5:expr) => { msa_addvi_h(a, $imm5) @@ -1397,7 +1448,7 @@ unsafe fn __msa_addvi_h(a: i16x8, imm5: u32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.w, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_addvi_w(a: i32x4, imm5: u32) -> i32x4 { +unsafe fn __msa_addvi_w(a: v4i32, imm5: u32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_addvi_w(a, $imm5) @@ -1416,7 +1467,7 @@ unsafe fn __msa_addvi_w(a: i32x4, imm5: u32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.d, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_addvi_d(a: i64x2, imm5: u32) -> i64x2 { +unsafe fn __msa_addvi_d(a: v2i64, imm5: u32) -> v2i64 { macro_rules! call { ($imm5:expr) => { msa_addvi_d(a, $imm5) @@ -1435,8 +1486,8 @@ unsafe fn __msa_addvi_d(a: i64x2, imm5: u32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(and.v))] -unsafe fn __msa_and_v(a: u8x16, b: u8x16) -> u8x16 { - msa_and_v(a, b) +unsafe fn __msa_and_v(a: v16u8, b: v16u8) -> v16u8 { + msa_and_v(a, ::mem::transmute(b)) } /// Immediate Logical And @@ -1449,7 +1500,7 @@ unsafe fn __msa_and_v(a: u8x16, b: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(andi.b, imm8 = 0b10010111))] #[rustc_args_required_const(1)] -unsafe fn __msa_andi_b(a: u8x16, imm8: u32) -> u8x16 { +unsafe fn __msa_andi_b(a: v16u8, imm8: u32) -> v16u8 { macro_rules! call { ($imm8:expr) => { msa_andi_b(a, $imm8) @@ -1467,8 +1518,8 @@ unsafe fn __msa_andi_b(a: u8x16, imm8: u32) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.b))] -unsafe fn __msa_asub_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_asub_s_b(a, b) +unsafe fn __msa_asub_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_asub_s_b(a, ::mem::transmute(b)) } /// Vector Absolute Values of Signed Subtract @@ -1480,8 +1531,8 @@ unsafe fn __msa_asub_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.h))] -unsafe fn __msa_asub_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_asub_s_h(a, b) +unsafe fn __msa_asub_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_asub_s_h(a, ::mem::transmute(b)) } /// Vector Absolute Values of Signed Subtract @@ -1493,8 +1544,8 @@ unsafe fn __msa_asub_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.w))] -unsafe fn __msa_asub_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_asub_s_w(a, b) +unsafe fn __msa_asub_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_asub_s_w(a, ::mem::transmute(b)) } /// Vector Absolute Values of Signed Subtract @@ -1506,8 +1557,8 @@ unsafe fn __msa_asub_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.d))] -unsafe fn __msa_asub_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_asub_s_d(a, b) +unsafe fn __msa_asub_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_asub_s_d(a, ::mem::transmute(b)) } /// Vector Absolute Values of Unsigned Subtract @@ -1519,8 +1570,8 @@ unsafe fn __msa_asub_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.b))] -unsafe fn __msa_asub_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_asub_u_b(a, b) +unsafe fn __msa_asub_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_asub_u_b(a, ::mem::transmute(b)) } /// Vector Absolute Values of Unsigned Subtract @@ -1532,8 +1583,8 @@ unsafe fn __msa_asub_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.h))] -unsafe fn __msa_asub_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_asub_u_h(a, b) +unsafe fn __msa_asub_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_asub_u_h(a, ::mem::transmute(b)) } /// Vector Absolute Values of Unsigned Subtract @@ -1545,8 +1596,8 @@ unsafe fn __msa_asub_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.w))] -unsafe fn __msa_asub_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_asub_u_w(a, b) +unsafe fn __msa_asub_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_asub_u_w(a, ::mem::transmute(b)) } /// Vector Absolute Values of Unsigned Subtract @@ -1558,8 +1609,8 @@ unsafe fn __msa_asub_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.d))] -unsafe fn __msa_asub_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_asub_u_d(a, b) +unsafe fn __msa_asub_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_asub_u_d(a, ::mem::transmute(b)) } /// Vector Signed Average @@ -1573,8 +1624,8 @@ unsafe fn __msa_asub_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.b))] -unsafe fn __msa_ave_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_ave_s_b(a, b) +unsafe fn __msa_ave_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_ave_s_b(a, ::mem::transmute(b)) } /// Vector Signed Average @@ -1588,8 +1639,8 @@ unsafe fn __msa_ave_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.h))] -unsafe fn __msa_ave_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_ave_s_h(a, b) +unsafe fn __msa_ave_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_ave_s_h(a, ::mem::transmute(b)) } /// Vector Signed Average @@ -1603,8 +1654,8 @@ unsafe fn __msa_ave_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.w))] -unsafe fn __msa_ave_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_ave_s_w(a, b) +unsafe fn __msa_ave_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_ave_s_w(a, ::mem::transmute(b)) } /// Vector Signed Average @@ -1618,8 +1669,8 @@ unsafe fn __msa_ave_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.d))] -unsafe fn __msa_ave_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_ave_s_d(a, b) +unsafe fn __msa_ave_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_ave_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Average @@ -1633,8 +1684,8 @@ unsafe fn __msa_ave_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.b))] -unsafe fn __msa_ave_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_ave_u_b(a, b) +unsafe fn __msa_ave_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_ave_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Average @@ -1648,8 +1699,8 @@ unsafe fn __msa_ave_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.h))] -unsafe fn __msa_ave_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_ave_u_h(a, b) +unsafe fn __msa_ave_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_ave_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Average @@ -1663,8 +1714,8 @@ unsafe fn __msa_ave_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.w))] -unsafe fn __msa_ave_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_ave_u_w(a, b) +unsafe fn __msa_ave_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_ave_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Average @@ -1678,8 +1729,8 @@ unsafe fn __msa_ave_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.d))] -unsafe fn __msa_ave_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_ave_u_d(a, b) +unsafe fn __msa_ave_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_ave_u_d(a, ::mem::transmute(b)) } /// Vector Signed Average Rounded @@ -1694,8 +1745,8 @@ unsafe fn __msa_ave_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.b))] -unsafe fn __msa_aver_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_aver_s_b(a, b) +unsafe fn __msa_aver_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_aver_s_b(a, ::mem::transmute(b)) } /// Vector Signed Average Rounded @@ -1710,8 +1761,8 @@ unsafe fn __msa_aver_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.h))] -unsafe fn __msa_aver_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_aver_s_h(a, b) +unsafe fn __msa_aver_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_aver_s_h(a, ::mem::transmute(b)) } /// Vector Signed Average Rounded @@ -1726,8 +1777,8 @@ unsafe fn __msa_aver_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.w))] -unsafe fn __msa_aver_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_aver_s_w(a, b) +unsafe fn __msa_aver_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_aver_s_w(a, ::mem::transmute(b)) } /// Vector Signed Average Rounded @@ -1742,8 +1793,8 @@ unsafe fn __msa_aver_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.d))] -unsafe fn __msa_aver_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_aver_s_d(a, b) +unsafe fn __msa_aver_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_aver_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Average Rounded @@ -1758,8 +1809,8 @@ unsafe fn __msa_aver_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.b))] -unsafe fn __msa_aver_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_aver_u_b(a, b) +unsafe fn __msa_aver_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_aver_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Average Rounded @@ -1774,8 +1825,8 @@ unsafe fn __msa_aver_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.h))] -unsafe fn __msa_aver_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_aver_u_h(a, b) +unsafe fn __msa_aver_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_aver_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Average Rounded @@ -1790,8 +1841,8 @@ unsafe fn __msa_aver_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.w))] -unsafe fn __msa_aver_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_aver_u_w(a, b) +unsafe fn __msa_aver_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_aver_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Average Rounded @@ -1806,8 +1857,8 @@ unsafe fn __msa_aver_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.d))] -unsafe fn __msa_aver_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_aver_u_d(a, b) +unsafe fn __msa_aver_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_aver_u_d(a, ::mem::transmute(b)) } /// Vector Bit Clear @@ -1820,8 +1871,8 @@ unsafe fn __msa_aver_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.b))] -unsafe fn __msa_bclr_b(a: u8x16, b: u8x16) -> u8x16 { - msa_bclr_b(a, b) +unsafe fn __msa_bclr_b(a: v16u8, b: v16u8) -> v16u8 { + msa_bclr_b(a, ::mem::transmute(b)) } /// Vector Bit Clear @@ -1834,8 +1885,8 @@ unsafe fn __msa_bclr_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.h))] -unsafe fn __msa_bclr_h(a: u16x8, b: u16x8) -> u16x8 { - msa_bclr_h(a, b) +unsafe fn __msa_bclr_h(a: v8u16, b: v8u16) -> v8u16 { + msa_bclr_h(a, ::mem::transmute(b)) } /// Vector Bit Clear @@ -1848,8 +1899,8 @@ unsafe fn __msa_bclr_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.w))] -unsafe fn __msa_bclr_w(a: u32x4, b: u32x4) -> u32x4 { - msa_bclr_w(a, b) +unsafe fn __msa_bclr_w(a: v4u32, b: v4u32) -> v4u32 { + msa_bclr_w(a, ::mem::transmute(b)) } /// Vector Bit Clear @@ -1862,8 +1913,8 @@ unsafe fn __msa_bclr_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.d))] -unsafe fn __msa_bclr_d(a: u64x2, b: u64x2) -> u64x2 { - msa_bclr_d(a, b) +unsafe fn __msa_bclr_d(a: v2u64, b: v2u64) -> v2u64 { + msa_bclr_d(a, ::mem::transmute(b)) } /// Immediate Bit Clear @@ -1876,7 +1927,7 @@ unsafe fn __msa_bclr_d(a: u64x2, b: u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.b, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bclri_b(a: u8x16, imm3: i32) -> u8x16 { +unsafe fn __msa_bclri_b(a: v16u8, imm3: i32) -> v16u8 { macro_rules! call { ($imm3:expr) => { msa_bclri_b(a, $imm3) @@ -1895,7 +1946,7 @@ unsafe fn __msa_bclri_b(a: u8x16, imm3: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.h, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bclri_h(a: u16x8, imm4: i32) -> u16x8 { +unsafe fn __msa_bclri_h(a: v8u16, imm4: i32) -> v8u16 { macro_rules! call { ($imm4:expr) => { msa_bclri_h(a, $imm4) @@ -1914,7 +1965,7 @@ unsafe fn __msa_bclri_h(a: u16x8, imm4: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bclri_w(a: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_bclri_w(a: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { msa_bclri_w(a, $imm5) @@ -1933,7 +1984,7 @@ unsafe fn __msa_bclri_w(a: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.d, imm6 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bclri_d(a: u64x2, imm6: i32) -> u64x2 { +unsafe fn __msa_bclri_d(a: v2u64, imm6: i32) -> v2u64 { macro_rules! call { ($imm6:expr) => { msa_bclri_d(a, $imm6) @@ -1952,8 +2003,8 @@ unsafe fn __msa_bclri_d(a: u64x2, imm6: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.b))] -unsafe fn __msa_binsl_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { - msa_binsl_b(a, b, c) +unsafe fn __msa_binsl_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + msa_binsl_b(a, ::mem::transmute(b), c) } /// Vector Bit Insert Left @@ -1966,8 +2017,8 @@ unsafe fn __msa_binsl_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.h))] -unsafe fn __msa_binsl_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8 { - msa_binsl_h(a, b, c) +unsafe fn __msa_binsl_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16 { + msa_binsl_h(a, ::mem::transmute(b), c) } /// Vector Bit Insert Left @@ -1980,8 +2031,8 @@ unsafe fn __msa_binsl_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.w))] -unsafe fn __msa_binsl_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4 { - msa_binsl_w(a, b, c) +unsafe fn __msa_binsl_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32 { + msa_binsl_w(a, ::mem::transmute(b), c) } /// Vector Bit Insert Left @@ -1994,8 +2045,8 @@ unsafe fn __msa_binsl_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.d))] -unsafe fn __msa_binsl_d(a: u64x2, b: u64x2, c:u64x2) -> u64x2 { - msa_binsl_d(a, b, c) +unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { + msa_binsl_d(a, ::mem::transmute(b), c) } /// Immediate Bit Insert Left @@ -2008,10 +2059,10 @@ unsafe fn __msa_binsl_d(a: u64x2, b: u64x2, c:u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsli_b(a: u8x16, b: u8x16, imm3: i32) -> u8x16 { +unsafe fn __msa_binsli_b(a: v16u8, b: v16u8, imm3: i32) -> v16u8 { macro_rules! call { ($imm3:expr) => { - msa_binsli_b(a, b, $imm3) + msa_binsli_b(a, ::mem::transmute(b), $imm3) }; } constify_imm3!(imm3, call) @@ -2027,10 +2078,10 @@ unsafe fn __msa_binsli_b(a: u8x16, b: u8x16, imm3: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsli_h(a: u16x8, b: u16x8, imm4: i32) -> u16x8 { +unsafe fn __msa_binsli_h(a: v8u16, b: v8u16, imm4: i32) -> v8u16 { macro_rules! call { ($imm4:expr) => { - msa_binsli_h(a, b, $imm4) + msa_binsli_h(a, ::mem::transmute(b), $imm4) }; } constify_imm4!(imm4, call) @@ -2046,10 +2097,10 @@ unsafe fn __msa_binsli_h(a: u16x8, b: u16x8, imm4: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsli_w(a: u32x4, b: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_binsli_w(a: v4u32, b: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { - msa_binsli_w(a, b, $imm5) + msa_binsli_w(a, ::mem::transmute(b), $imm5) }; } constify_imm5!(imm5, call) @@ -2065,10 +2116,10 @@ unsafe fn __msa_binsli_w(a: u32x4, b: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsli_d(a: u64x2, b: u64x2, imm6: i32) -> u64x2 { +unsafe fn __msa_binsli_d(a: v2u64, b: v2u64, imm6: i32) -> v2u64 { macro_rules! call { ($imm6:expr) => { - msa_binsli_d(a, b, $imm6) + msa_binsli_d(a, ::mem::transmute(b), $imm6) }; } constify_imm6!(imm6, call) @@ -2084,8 +2135,8 @@ unsafe fn __msa_binsli_d(a: u64x2, b: u64x2, imm6: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.b))] -unsafe fn __msa_binsr_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { - msa_binsr_b(a, b, c) +unsafe fn __msa_binsr_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + msa_binsr_b(a, ::mem::transmute(b), c) } /// Vector Bit Insert Right @@ -2098,8 +2149,8 @@ unsafe fn __msa_binsr_b(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.h))] -unsafe fn __msa_binsr_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8 { - msa_binsr_h(a, b, c) +unsafe fn __msa_binsr_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16 { + msa_binsr_h(a, ::mem::transmute(b), c) } /// Vector Bit Insert Right @@ -2112,8 +2163,8 @@ unsafe fn __msa_binsr_h(a: u16x8, b: u16x8, c: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.w))] -unsafe fn __msa_binsr_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4 { - msa_binsr_w(a, b, c) +unsafe fn __msa_binsr_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32 { + msa_binsr_w(a, ::mem::transmute(b), c) } /// Vector Bit Insert Right @@ -2126,8 +2177,8 @@ unsafe fn __msa_binsr_w(a: u32x4, b: u32x4, c: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.d))] -unsafe fn __msa_binsr_d(a: u64x2, b: u64x2, c:u64x2) -> u64x2 { - msa_binsr_d(a, b, c) +unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { + msa_binsr_d(a, ::mem::transmute(b), c) } /// Immediate Bit Insert Right @@ -2140,10 +2191,10 @@ unsafe fn __msa_binsr_d(a: u64x2, b: u64x2, c:u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsri_b(a: u8x16, b: u8x16, imm3: i32) -> u8x16 { +unsafe fn __msa_binsri_b(a: v16u8, b: v16u8, imm3: i32) -> v16u8 { macro_rules! call { ($imm3:expr) => { - msa_binsri_b(a, b, $imm3) + msa_binsri_b(a, ::mem::transmute(b), $imm3) }; } constify_imm3!(imm3, call) @@ -2159,10 +2210,10 @@ unsafe fn __msa_binsri_b(a: u8x16, b: u8x16, imm3: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsri_h(a: u16x8, b: u16x8, imm4: i32) -> u16x8 { +unsafe fn __msa_binsri_h(a: v8u16, b: v8u16, imm4: i32) -> v8u16 { macro_rules! call { ($imm4:expr) => { - msa_binsri_h(a, b, $imm4) + msa_binsri_h(a, ::mem::transmute(b), $imm4) }; } constify_imm4!(imm4, call) @@ -2178,10 +2229,10 @@ unsafe fn __msa_binsri_h(a: u16x8, b: u16x8, imm4: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsri_w(a: u32x4, b: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_binsri_w(a: v4u32, b: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { - msa_binsri_w(a, b, $imm5) + msa_binsri_w(a, ::mem::transmute(b), $imm5) }; } constify_imm5!(imm5, call) @@ -2197,10 +2248,10 @@ unsafe fn __msa_binsri_w(a: u32x4, b: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_binsri_d(a: u64x2, b: u64x2, imm6: i32) -> u64x2 { +unsafe fn __msa_binsri_d(a: v2u64, b: v2u64, imm6: i32) -> v2u64 { macro_rules! call { ($imm6:expr) => { - msa_binsri_d(a, b, $imm6) + msa_binsri_d(a, ::mem::transmute(b), $imm6) }; } constify_imm6!(imm6, call) @@ -2216,8 +2267,8 @@ unsafe fn __msa_binsri_d(a: u64x2, b: u64x2, imm6: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmnz.v))] -unsafe fn __msa_bmnz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { - msa_bmnz_v(a, b, c) +unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + msa_bmnz_v(a, ::mem::transmute(b), c) } /// Immediate Bit Move If Not Zero @@ -2230,10 +2281,10 @@ unsafe fn __msa_bmnz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_bmnzi_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { - macro_rules! call { +unsafe fn __msa_bmnzi_b(a: v16u8, b: v16u8, imm8: i32) -> v16u8 { + macro_rules! call { ($imm8:expr) => { - msa_bmnzi_b(a, b, $imm8) + msa_bmnzi_b(a, ::mem::transmute(b), $imm8) }; } constify_imm8!(imm8, call) @@ -2249,8 +2300,8 @@ unsafe fn __msa_bmnzi_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmz.v))] -unsafe fn __msa_bmz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { - msa_bmz_v(a, b, c) +unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + msa_bmz_v(a, ::mem::transmute(b), c) } /// Immediate Bit Move If Zero @@ -2263,10 +2314,10 @@ unsafe fn __msa_bmz_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_bmzi_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { - macro_rules! call { +unsafe fn __msa_bmzi_b(a: v16u8, b: v16u8, imm8: i32) -> v16u8 { + macro_rules! call { ($imm8:expr) => { - msa_bmzi_b(a, b, $imm8) + msa_bmzi_b(a, ::mem::transmute(b), $imm8) }; } constify_imm8!(imm8, call) @@ -2282,8 +2333,8 @@ unsafe fn __msa_bmzi_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.b))] -unsafe fn __msa_bneg_b(a: u8x16, b: u8x16) -> u8x16 { - msa_bneg_b(a, b) +unsafe fn __msa_bneg_b(a: v16u8, b: v16u8) -> v16u8 { + msa_bneg_b(a, ::mem::transmute(b)) } /// Vector Bit Negate @@ -2296,8 +2347,8 @@ unsafe fn __msa_bneg_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.h))] -unsafe fn __msa_bneg_h(a: u16x8, b: u16x8) -> u16x8 { - msa_bneg_h(a, b) +unsafe fn __msa_bneg_h(a: v8u16, b: v8u16) -> v8u16 { + msa_bneg_h(a, ::mem::transmute(b)) } /// Vector Bit Negate @@ -2310,8 +2361,8 @@ unsafe fn __msa_bneg_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.w))] -unsafe fn __msa_bneg_w(a: u32x4, b: u32x4) -> u32x4 { - msa_bneg_w(a, b) +unsafe fn __msa_bneg_w(a: v4u32, b: v4u32) -> v4u32 { + msa_bneg_w(a, ::mem::transmute(b)) } /// Vector Bit Negate @@ -2324,8 +2375,8 @@ unsafe fn __msa_bneg_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.d))] -unsafe fn __msa_bneg_d(a: u64x2, b: u64x2) -> u64x2 { - msa_bneg_d(a, b) +unsafe fn __msa_bneg_d(a: v2u64, b: v2u64) -> v2u64 { + msa_bneg_d(a, ::mem::transmute(b)) } /// Immediate Bit Negate @@ -2338,7 +2389,7 @@ unsafe fn __msa_bneg_d(a: u64x2, b: u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.b, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bnegi_b(a: u8x16, imm3: i32) -> u8x16 { +unsafe fn __msa_bnegi_b(a: v16u8, imm3: i32) -> v16u8 { macro_rules! call { ($imm3:expr) => { msa_bnegi_b(a, $imm3) @@ -2357,7 +2408,7 @@ unsafe fn __msa_bnegi_b(a: u8x16, imm3: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.h, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bnegi_h(a: u16x8, imm4: i32) -> u16x8 { +unsafe fn __msa_bnegi_h(a: v8u16, imm4: i32) -> v8u16 { macro_rules! call { ($imm4:expr) => { msa_bnegi_h(a, $imm4) @@ -2376,7 +2427,7 @@ unsafe fn __msa_bnegi_h(a: u16x8, imm4: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bnegi_w(a: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_bnegi_w(a: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { msa_bnegi_w(a, $imm5) @@ -2395,7 +2446,7 @@ unsafe fn __msa_bnegi_w(a: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.d, imm6 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bnegi_d(a: u64x2, imm6: i32) -> u64x2 { +unsafe fn __msa_bnegi_d(a: v2u64, imm6: i32) -> v2u64 { macro_rules! call { ($imm6:expr) => { msa_bnegi_d(a, $imm6) @@ -2411,7 +2462,7 @@ unsafe fn __msa_bnegi_d(a: u64x2, imm6: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.b))] -unsafe fn __msa_bnz_b(a: u8x16) -> i32 { +unsafe fn __msa_bnz_b(a: v16u8) -> i32 { msa_bnz_b(a) } @@ -2422,7 +2473,7 @@ unsafe fn __msa_bnz_b(a: u8x16) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.h))] -unsafe fn __msa_bnz_h(a: u16x8) -> i32 { +unsafe fn __msa_bnz_h(a: v8u16) -> i32 { msa_bnz_h(a) } @@ -2433,7 +2484,7 @@ unsafe fn __msa_bnz_h(a: u16x8) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.w))] -unsafe fn __msa_bnz_w(a: u32x4) -> i32 { +unsafe fn __msa_bnz_w(a: v4u32) -> i32 { msa_bnz_w(a) } @@ -2444,7 +2495,7 @@ unsafe fn __msa_bnz_w(a: u32x4) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.d))] -unsafe fn __msa_bnz_d(a: u64x2) -> i32 { +unsafe fn __msa_bnz_d(a: v2u64) -> i32 { msa_bnz_d(a) } @@ -2456,7 +2507,7 @@ unsafe fn __msa_bnz_d(a: u64x2) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.v))] -unsafe fn __msa_bnz_v(a: u8x16) -> i32 { +unsafe fn __msa_bnz_v(a: v16u8) -> i32 { msa_bnz_v(a) } @@ -2470,8 +2521,8 @@ unsafe fn __msa_bnz_v(a: u8x16) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bsel.v))] -unsafe fn __msa_bsel_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { - msa_bsel_v(a, b, c) +unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + msa_bsel_v(a, ::mem::transmute(b), c) } /// Immediate Bit Select @@ -2484,10 +2535,10 @@ unsafe fn __msa_bsel_v(a: u8x16, b: u8x16, c: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_bseli_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { - macro_rules! call { +unsafe fn __msa_bseli_b(a: v16u8, b: v16u8, imm8: i32) -> v16u8 { + macro_rules! call { ($imm8:expr) => { - msa_bseli_b(a, b, $imm8) + msa_bseli_b(a, ::mem::transmute(b), $imm8) }; } constify_imm8!(imm8, call) @@ -2503,8 +2554,8 @@ unsafe fn __msa_bseli_b(a: u8x16, b: u8x16, imm8: i32) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.b))] -unsafe fn __msa_bset_b(a: u8x16, b: u8x16) -> u8x16 { - msa_bset_b(a, b) +unsafe fn __msa_bset_b(a: v16u8, b: v16u8) -> v16u8 { + msa_bset_b(a, ::mem::transmute(b)) } /// Vector Bit Set @@ -2517,8 +2568,8 @@ unsafe fn __msa_bset_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.h))] -unsafe fn __msa_bset_h(a: u16x8, b: u16x8) -> u16x8 { - msa_bset_h(a, b) +unsafe fn __msa_bset_h(a: v8u16, b: v8u16) -> v8u16 { + msa_bset_h(a, ::mem::transmute(b)) } /// Vector Bit Set @@ -2531,8 +2582,8 @@ unsafe fn __msa_bset_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.w))] -unsafe fn __msa_bset_w(a: u32x4, b: u32x4) -> u32x4 { - msa_bset_w(a, b) +unsafe fn __msa_bset_w(a: v4u32, b: v4u32) -> v4u32 { + msa_bset_w(a, ::mem::transmute(b)) } /// Vector Bit Set @@ -2545,8 +2596,8 @@ unsafe fn __msa_bset_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.d))] -unsafe fn __msa_bset_d(a: u64x2, b: u64x2) -> u64x2 { - msa_bset_d(a, b) +unsafe fn __msa_bset_d(a: v2u64, b: v2u64) -> v2u64 { + msa_bset_d(a, ::mem::transmute(b)) } /// Immediate Bit Set @@ -2559,7 +2610,7 @@ unsafe fn __msa_bset_d(a: u64x2, b: u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.b, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bseti_b(a: u8x16, imm3: i32) -> u8x16 { +unsafe fn __msa_bseti_b(a: v16u8, imm3: i32) -> v16u8 { macro_rules! call { ($imm3:expr) => { msa_bseti_b(a, $imm3) @@ -2578,7 +2629,7 @@ unsafe fn __msa_bseti_b(a: u8x16, imm3: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.h, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bseti_h(a: u16x8, imm4: i32) -> u16x8 { +unsafe fn __msa_bseti_h(a: v8u16, imm4: i32) -> v8u16 { macro_rules! call { ($imm4:expr) => { msa_bseti_h(a, $imm4) @@ -2597,7 +2648,7 @@ unsafe fn __msa_bseti_h(a: u16x8, imm4: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bseti_w(a: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_bseti_w(a: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { msa_bseti_w(a, $imm5) @@ -2616,7 +2667,7 @@ unsafe fn __msa_bseti_w(a: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.d, imm6 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_bseti_d(a: u64x2, imm6: i32) -> u64x2 { +unsafe fn __msa_bseti_d(a: v2u64, imm6: i32) -> v2u64 { macro_rules! call { ($imm6:expr) => { msa_bseti_d(a, $imm6) @@ -2632,7 +2683,7 @@ unsafe fn __msa_bseti_d(a: u64x2, imm6: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.b))] -unsafe fn __msa_bz_b(a: u8x16) -> i32 { +unsafe fn __msa_bz_b(a: v16u8) -> i32 { msa_bz_b(a) } @@ -2643,7 +2694,7 @@ unsafe fn __msa_bz_b(a: u8x16) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.h))] -unsafe fn __msa_bz_h(a: u16x8) -> i32 { +unsafe fn __msa_bz_h(a: v8u16) -> i32 { msa_bz_h(a) } @@ -2654,7 +2705,7 @@ unsafe fn __msa_bz_h(a: u16x8) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.w))] -unsafe fn __msa_bz_w(a: u32x4) -> i32 { +unsafe fn __msa_bz_w(a: v4u32) -> i32 { msa_bz_w(a) } @@ -2665,7 +2716,7 @@ unsafe fn __msa_bz_w(a: u32x4) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.d))] -unsafe fn __msa_bz_d(a: u64x2) -> i32 { +unsafe fn __msa_bz_d(a: v2u64) -> i32 { msa_bz_d(a) } @@ -2677,7 +2728,7 @@ unsafe fn __msa_bz_d(a: u64x2) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.v))] -unsafe fn __msa_bz_v(a: u8x16) -> i32 { +unsafe fn __msa_bz_v(a: v16u8) -> i32 { msa_bz_v(a) } @@ -2690,8 +2741,8 @@ unsafe fn __msa_bz_v(a: u8x16) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.b))] -unsafe fn __msa_ceq_b(a: i8x16, b: i8x16) -> i8x16 { - msa_ceq_b(a, b) +unsafe fn __msa_ceq_b(a: v16i8, b: v16i8) -> v16i8 { + msa_ceq_b(a, ::mem::transmute(b)) } /// Vector Compare Equal @@ -2703,8 +2754,8 @@ unsafe fn __msa_ceq_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.h))] -unsafe fn __msa_ceq_h(a: i16x8, b: i16x8) -> i16x8 { - msa_ceq_h(a, b) +unsafe fn __msa_ceq_h(a: v8i16, b: v8i16) -> v8i16 { + msa_ceq_h(a, ::mem::transmute(b)) } /// Vector Compare Equal @@ -2716,8 +2767,8 @@ unsafe fn __msa_ceq_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.w))] -unsafe fn __msa_ceq_w(a: i32x4, b: i32x4) -> i32x4 { - msa_ceq_w(a, b) +unsafe fn __msa_ceq_w(a: v4i32, b: v4i32) -> v4i32 { + msa_ceq_w(a, ::mem::transmute(b)) } /// Vector Compare Equal @@ -2729,8 +2780,8 @@ unsafe fn __msa_ceq_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.d))] -unsafe fn __msa_ceq_d(a: i64x2, b: i64x2) -> i64x2 { - msa_ceq_d(a, b) +unsafe fn __msa_ceq_d(a: v2i64, b: v2i64) -> v2i64 { + msa_ceq_d(a, ::mem::transmute(b)) } /// Immediate Compare Equal @@ -2743,7 +2794,7 @@ unsafe fn __msa_ceq_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.b, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ceqi_b(a: i8x16, imm_s5: i32) -> i8x16 { +unsafe fn __msa_ceqi_b(a: v16i8, imm_s5: i32) -> v16i8 { macro_rules! call { ($imm_s5:expr) => { msa_ceqi_b(a, $imm_s5) @@ -2762,7 +2813,7 @@ unsafe fn __msa_ceqi_b(a: i8x16, imm_s5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.h, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ceqi_h(a: i16x8, imm_s5: i32) -> i16x8 { +unsafe fn __msa_ceqi_h(a: v8i16, imm_s5: i32) -> v8i16 { macro_rules! call { ($imm_s5:expr) => { msa_ceqi_h(a, $imm_s5) @@ -2781,7 +2832,7 @@ unsafe fn __msa_ceqi_h(a: i16x8, imm_s5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.w, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ceqi_w(a: i32x4, imm_s5: i32) -> i32x4 { +unsafe fn __msa_ceqi_w(a: v4i32, imm_s5: i32) -> v4i32 { macro_rules! call { ($imm_s5:expr) => { msa_ceqi_w(a, $imm_s5) @@ -2800,7 +2851,7 @@ unsafe fn __msa_ceqi_w(a: i32x4, imm_s5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.d, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ceqi_d(a: i64x2, imm_s5: i32) -> i64x2 { +unsafe fn __msa_ceqi_d(a: v2i64, imm_s5: i32) -> v2i64 { macro_rules! call { ($imm_s5:expr) => { msa_ceqi_d(a, $imm_s5) @@ -2837,8 +2888,8 @@ unsafe fn __msa_cfcmsa(imm5: i32) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.b))] -unsafe fn __msa_cle_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_cle_s_b(a, b) +unsafe fn __msa_cle_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_cle_s_b(a, ::mem::transmute(b)) } /// Vector Compare Signed Less Than or Equal @@ -2851,8 +2902,8 @@ unsafe fn __msa_cle_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.h))] -unsafe fn __msa_cle_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_cle_s_h(a, b) +unsafe fn __msa_cle_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_cle_s_h(a, ::mem::transmute(b)) } /// Vector Compare Signed Less Than or Equal @@ -2865,8 +2916,8 @@ unsafe fn __msa_cle_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.w))] -unsafe fn __msa_cle_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_cle_s_w(a, b) +unsafe fn __msa_cle_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_cle_s_w(a, ::mem::transmute(b)) } /// Vector Compare Signed Less Than or Equal @@ -2879,8 +2930,8 @@ unsafe fn __msa_cle_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.d))] -unsafe fn __msa_cle_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_cle_s_d(a, b) +unsafe fn __msa_cle_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_cle_s_d(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than or Equal @@ -2893,8 +2944,8 @@ unsafe fn __msa_cle_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.b))] -unsafe fn __msa_cle_u_b(a: u8x16, b: u8x16) -> i8x16 { - msa_cle_u_b(a, b) +unsafe fn __msa_cle_u_b(a: v16u8, b: v16u8) -> v16i8 { + msa_cle_u_b(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than or Equal @@ -2907,8 +2958,8 @@ unsafe fn __msa_cle_u_b(a: u8x16, b: u8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.h))] -unsafe fn __msa_cle_u_h(a: u16x8, b: u16x8) -> i16x8 { - msa_cle_u_h(a, b) +unsafe fn __msa_cle_u_h(a: v8u16, b: v8u16) -> v8i16 { + msa_cle_u_h(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than or Equal @@ -2921,8 +2972,8 @@ unsafe fn __msa_cle_u_h(a: u16x8, b: u16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.w))] -unsafe fn __msa_cle_u_w(a: u32x4, b: u32x4) -> i32x4 { - msa_cle_u_w(a, b) +unsafe fn __msa_cle_u_w(a: v4u32, b: v4u32) -> v4i32 { + msa_cle_u_w(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than or Equal @@ -2935,8 +2986,8 @@ unsafe fn __msa_cle_u_w(a: u32x4, b: u32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.d))] -unsafe fn __msa_cle_u_d(a: u64x2, b: u64x2) -> i64x2 { - msa_cle_u_d(a, b) +unsafe fn __msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64 { + msa_cle_u_d(a, ::mem::transmute(b)) } /// Immediate Compare Signed Less Than or Equal @@ -2950,7 +3001,7 @@ unsafe fn __msa_cle_u_d(a: u64x2, b: u64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.b, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_s_b(a: i8x16, imm_s5: i32) -> i8x16 { +unsafe fn __msa_clei_s_b(a: v16i8, imm_s5: i32) -> v16i8 { macro_rules! call { ($imm_s5:expr) => { msa_clei_s_b(a, $imm_s5) @@ -2970,7 +3021,7 @@ unsafe fn __msa_clei_s_b(a: i8x16, imm_s5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.h, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_s_h(a: i16x8, imm_s5: i32) -> i16x8 { +unsafe fn __msa_clei_s_h(a: v8i16, imm_s5: i32) -> v8i16 { macro_rules! call { ($imm_s5:expr) => { msa_clei_s_h(a, $imm_s5) @@ -2990,7 +3041,7 @@ unsafe fn __msa_clei_s_h(a: i16x8, imm_s5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.w, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_s_w(a: i32x4, imm_s5: i32) -> i32x4 { +unsafe fn __msa_clei_s_w(a: v4i32, imm_s5: i32) -> v4i32 { macro_rules! call { ($imm_s5:expr) => { msa_clei_s_w(a, $imm_s5) @@ -3010,7 +3061,7 @@ unsafe fn __msa_clei_s_w(a: i32x4, imm_s5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.d, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_s_d(a: i64x2, imm_s5: i32) -> i64x2 { +unsafe fn __msa_clei_s_d(a: v2i64, imm_s5: i32) -> v2i64 { macro_rules! call { ($imm_s5:expr) => { msa_clei_s_d(a, $imm_s5) @@ -3030,7 +3081,7 @@ unsafe fn __msa_clei_s_d(a: i64x2, imm_s5: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.b, imm5 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_u_b(a: u8x16, imm5: i32) -> i8x16 { +unsafe fn __msa_clei_u_b(a: v16u8, imm5: i32) -> v16i8 { macro_rules! call { ($imm5:expr) => { msa_clei_u_b(a, $imm5) @@ -3050,7 +3101,7 @@ unsafe fn __msa_clei_u_b(a: u8x16, imm5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.h, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_u_h(a: u16x8, imm5: i32) -> i16x8 { +unsafe fn __msa_clei_u_h(a: v8u16, imm5: i32) -> v8i16 { macro_rules! call { ($imm5:expr) => { msa_clei_u_h(a, $imm5) @@ -3070,7 +3121,7 @@ unsafe fn __msa_clei_u_h(a: u16x8, imm5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_u_w(a: u32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_clei_u_w(a: v4u32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_clei_u_w(a, $imm5) @@ -3090,7 +3141,7 @@ unsafe fn __msa_clei_u_w(a: u32x4, imm5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.d, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clei_u_d(a: u64x2, imm5: i32) -> i64x2 { +unsafe fn __msa_clei_u_d(a: v2u64, imm5: i32) -> v2i64 { macro_rules! call { ($imm5:expr) => { msa_clei_u_d(a, $imm5) @@ -3109,8 +3160,8 @@ unsafe fn __msa_clei_u_d(a: u64x2, imm5: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.b))] -unsafe fn __msa_clt_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_clt_s_b(a, b) +unsafe fn __msa_clt_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_clt_s_b(a, ::mem::transmute(b)) } /// Vector Compare Signed Less Than @@ -3123,8 +3174,8 @@ unsafe fn __msa_clt_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.h))] -unsafe fn __msa_clt_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_clt_s_h(a, b) +unsafe fn __msa_clt_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_clt_s_h(a, ::mem::transmute(b)) } /// Vector Compare Signed Less Than @@ -3137,8 +3188,8 @@ unsafe fn __msa_clt_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.w))] -unsafe fn __msa_clt_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_clt_s_w(a, b) +unsafe fn __msa_clt_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_clt_s_w(a, ::mem::transmute(b)) } /// Vector Compare Signed Less Than @@ -3151,8 +3202,8 @@ unsafe fn __msa_clt_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.d))] -unsafe fn __msa_clt_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_clt_s_d(a, b) +unsafe fn __msa_clt_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_clt_s_d(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than @@ -3165,8 +3216,8 @@ unsafe fn __msa_clt_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.b))] -unsafe fn __msa_clt_u_b(a: u8x16, b: u8x16) -> i8x16 { - msa_clt_u_b(a, b) +unsafe fn __msa_clt_u_b(a: v16u8, b: v16u8) -> v16i8 { + msa_clt_u_b(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than @@ -3179,8 +3230,8 @@ unsafe fn __msa_clt_u_b(a: u8x16, b: u8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.h))] -unsafe fn __msa_clt_u_h(a: u16x8, b: u16x8) -> i16x8 { - msa_clt_u_h(a, b) +unsafe fn __msa_clt_u_h(a: v8u16, b: v8u16) -> v8i16 { + msa_clt_u_h(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than @@ -3193,8 +3244,8 @@ unsafe fn __msa_clt_u_h(a: u16x8, b: u16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.w))] -unsafe fn __msa_clt_u_w(a: u32x4, b: u32x4) -> i32x4 { - msa_clt_u_w(a, b) +unsafe fn __msa_clt_u_w(a: v4u32, b: v4u32) -> v4i32 { + msa_clt_u_w(a, ::mem::transmute(b)) } /// Vector Compare Unsigned Less Than @@ -3207,8 +3258,8 @@ unsafe fn __msa_clt_u_w(a: u32x4, b: u32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.d))] -unsafe fn __msa_clt_u_d(a: u64x2, b: u64x2) -> i64x2 { - msa_clt_u_d(a, b) +unsafe fn __msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64 { + msa_clt_u_d(a, ::mem::transmute(b)) } /// Immediate Compare Signed Less Than @@ -3222,7 +3273,7 @@ unsafe fn __msa_clt_u_d(a: u64x2, b: u64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.b, imm_s5 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_s_b(a: i8x16, imm_s5: i32) -> i8x16 { +unsafe fn __msa_clti_s_b(a: v16i8, imm_s5: i32) -> v16i8 { macro_rules! call { ($imm_s5:expr) => { msa_clti_s_b(a, $imm_s5) @@ -3242,7 +3293,7 @@ unsafe fn __msa_clti_s_b(a: i8x16, imm_s5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.h, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_s_h(a: i16x8, imm_s5: i32) -> i16x8 { +unsafe fn __msa_clti_s_h(a: v8i16, imm_s5: i32) -> v8i16 { macro_rules! call { ($imm_s5:expr) => { msa_clti_s_h(a, $imm_s5) @@ -3262,7 +3313,7 @@ unsafe fn __msa_clti_s_h(a: i16x8, imm_s5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.w, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_s_w(a: i32x4, imm_s5: i32) -> i32x4 { +unsafe fn __msa_clti_s_w(a: v4i32, imm_s5: i32) -> v4i32 { macro_rules! call { ($imm_s5:expr) => { msa_clti_s_w(a, $imm_s5) @@ -3282,7 +3333,7 @@ unsafe fn __msa_clti_s_w(a: i32x4, imm_s5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.d, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_s_d(a: i64x2, imm_s5: i32) -> i64x2 { +unsafe fn __msa_clti_s_d(a: v2i64, imm_s5: i32) -> v2i64 { macro_rules! call { ($imm_s5:expr) => { msa_clti_s_d(a, $imm_s5) @@ -3302,7 +3353,7 @@ unsafe fn __msa_clti_s_d(a: i64x2, imm_s5: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.b, imm5 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_u_b(a: u8x16, imm5: i32) -> i8x16 { +unsafe fn __msa_clti_u_b(a: v16u8, imm5: i32) -> v16i8 { macro_rules! call { ($imm5:expr) => { msa_clti_u_b(a, $imm5) @@ -3322,7 +3373,7 @@ unsafe fn __msa_clti_u_b(a: u8x16, imm5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.h, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_u_h(a: u16x8, imm5: i32) -> i16x8 { +unsafe fn __msa_clti_u_h(a: v8u16, imm5: i32) -> v8i16 { macro_rules! call { ($imm5:expr) => { msa_clti_u_h(a, $imm5) @@ -3342,7 +3393,7 @@ unsafe fn __msa_clti_u_h(a: u16x8, imm5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_u_w(a: u32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_clti_u_w(a: v4u32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_clti_u_w(a, $imm5) @@ -3362,7 +3413,7 @@ unsafe fn __msa_clti_u_w(a: u32x4, imm5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.d, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_clti_u_d(a: u64x2, imm5: i32) -> i64x2 { +unsafe fn __msa_clti_u_d(a: v2u64, imm5: i32) -> v2i64 { macro_rules! call { ($imm5:expr) => { msa_clti_u_d(a, $imm5) @@ -3380,7 +3431,7 @@ unsafe fn __msa_clti_u_d(a: u64x2, imm5: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_s_b(a: i8x16, imm4: i32) -> i32 { +unsafe fn __msa_copy_s_b(a: v16i8, imm4: i32) -> i32 { macro_rules! call { ($imm4:expr) => { msa_copy_s_b(a, $imm4) @@ -3398,7 +3449,7 @@ unsafe fn __msa_copy_s_b(a: i8x16, imm4: i32) -> i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_s_h(a: i16x8, imm3: i32) -> i32 { +unsafe fn __msa_copy_s_h(a: v8i16, imm3: i32) -> i32 { macro_rules! call { ($imm3:expr) => { msa_copy_s_h(a, $imm3) @@ -3416,7 +3467,7 @@ unsafe fn __msa_copy_s_h(a: i16x8, imm3: i32) -> i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_s_w(a: i32x4, imm2: i32) -> i32 { +unsafe fn __msa_copy_s_w(a: v4i32, imm2: i32) -> i32 { macro_rules! call { ($imm2:expr) => { msa_copy_s_w(a, $imm2) @@ -3434,7 +3485,7 @@ unsafe fn __msa_copy_s_w(a: i32x4, imm2: i32) -> i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_s_d(a: i64x2, imm1: i32) -> i64 { +unsafe fn __msa_copy_s_d(a: v2i64, imm1: i32) -> i64 { macro_rules! call { ($imm1:expr) => { msa_copy_s_d(a, $imm1) @@ -3452,7 +3503,7 @@ unsafe fn __msa_copy_s_d(a: i64x2, imm1: i32) -> i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_u_b(a: i8x16, imm4: i32) -> u32 { +unsafe fn __msa_copy_u_b(a: v16i8, imm4: i32) -> u32 { macro_rules! call { ($imm4:expr) => { msa_copy_u_b(a, $imm4) @@ -3470,7 +3521,7 @@ unsafe fn __msa_copy_u_b(a: i8x16, imm4: i32) -> u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_u_h(a: i16x8, imm3: i32) -> u32 { +unsafe fn __msa_copy_u_h(a: v8i16, imm3: i32) -> u32 { macro_rules! call { ($imm3:expr) => { msa_copy_u_h(a, $imm3) @@ -3488,7 +3539,7 @@ unsafe fn __msa_copy_u_h(a: i16x8, imm3: i32) -> u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_u_w(a: i32x4, imm2: i32) -> u32 { +unsafe fn __msa_copy_u_w(a: v4i32, imm2: i32) -> u32 { macro_rules! call { ($imm2:expr) => { msa_copy_u_w(a, $imm2) @@ -3506,7 +3557,7 @@ unsafe fn __msa_copy_u_w(a: i32x4, imm2: i32) -> u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_copy_u_d(a: i64x2, imm1: i32) -> u64 { +unsafe fn __msa_copy_u_d(a: v2i64, imm1: i32) -> u64 { macro_rules! call { ($imm1:expr) => { msa_copy_u_d(a, $imm1) @@ -3541,8 +3592,8 @@ unsafe fn __msa_ctcmsa(imm5: i32, a: i32) -> () { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.b))] -unsafe fn __msa_div_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_div_s_b(a, b) +unsafe fn __msa_div_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_div_s_b(a, ::mem::transmute(b)) } /// Vector Signed Divide @@ -3554,8 +3605,8 @@ unsafe fn __msa_div_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.h))] -unsafe fn __msa_div_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_div_s_h(a, b) +unsafe fn __msa_div_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_div_s_h(a, ::mem::transmute(b)) } /// Vector Signed Divide @@ -3567,8 +3618,8 @@ unsafe fn __msa_div_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.w))] -unsafe fn __msa_div_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_div_s_w(a, b) +unsafe fn __msa_div_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_div_s_w(a, ::mem::transmute(b)) } /// Vector Signed Divide @@ -3580,8 +3631,8 @@ unsafe fn __msa_div_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.d))] -unsafe fn __msa_div_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_div_s_d(a, b) +unsafe fn __msa_div_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_div_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Divide @@ -3593,8 +3644,8 @@ unsafe fn __msa_div_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.b))] -unsafe fn __msa_div_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_div_u_b(a, b) +unsafe fn __msa_div_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_div_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Divide @@ -3606,8 +3657,8 @@ unsafe fn __msa_div_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.h))] -unsafe fn __msa_div_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_div_u_h(a, b) +unsafe fn __msa_div_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_div_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Divide @@ -3619,8 +3670,8 @@ unsafe fn __msa_div_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.w))] -unsafe fn __msa_div_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_div_u_w(a, b) +unsafe fn __msa_div_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_div_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Divide @@ -3632,8 +3683,8 @@ unsafe fn __msa_div_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.d))] -unsafe fn __msa_div_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_div_u_d(a, b) +unsafe fn __msa_div_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_div_u_d(a, ::mem::transmute(b)) } /// Vector Signed Dot Product @@ -3647,8 +3698,8 @@ unsafe fn __msa_div_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_s.h))] -unsafe fn __msa_dotp_s_h(a: i8x16, b: i8x16) -> i16x8 { - msa_dotp_s_h(a, b) +unsafe fn __msa_dotp_s_h(a: v16i8, b: v16i8) -> v8i16 { + msa_dotp_s_h(a, ::mem::transmute(b)) } /// Vector Signed Dot Product @@ -3662,8 +3713,8 @@ unsafe fn __msa_dotp_s_h(a: i8x16, b: i8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_s.w))] -unsafe fn __msa_dotp_s_w(a: i16x8, b: i16x8) -> i32x4 { - msa_dotp_s_w(a, b) +unsafe fn __msa_dotp_s_w(a: v8i16, b: v8i16) -> v4i32 { + msa_dotp_s_w(a, ::mem::transmute(b)) } /// Vector Signed Dot Product @@ -3677,8 +3728,8 @@ unsafe fn __msa_dotp_s_w(a: i16x8, b: i16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_s.d))] -unsafe fn __msa_dotp_s_d(a: i32x4, b: i32x4) -> i64x2 { - msa_dotp_s_d(a, b) +unsafe fn __msa_dotp_s_d(a: v4i32, b: v4i32) -> v2i64 { + msa_dotp_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Dot Product @@ -3692,8 +3743,8 @@ unsafe fn __msa_dotp_s_d(a: i32x4, b: i32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_u.h))] -unsafe fn __msa_dotp_u_h(a: u8x16, b: u8x16) -> u16x8 { - msa_dotp_u_h(a, b) +unsafe fn __msa_dotp_u_h(a: v16u8, b: v16u8) -> v8u16 { + msa_dotp_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Dot Product @@ -3707,8 +3758,8 @@ unsafe fn __msa_dotp_u_h(a: u8x16, b: u8x16) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_u.w))] -unsafe fn __msa_dotp_u_w(a: u16x8, b: u16x8) -> u32x4 { - msa_dotp_u_w(a, b) +unsafe fn __msa_dotp_u_w(a: v8u16, b: v8u16) -> v4u32 { + msa_dotp_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Dot Product @@ -3722,8 +3773,8 @@ unsafe fn __msa_dotp_u_w(a: u16x8, b: u16x8) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_u.d))] -unsafe fn __msa_dotp_u_d(a: u32x4, b: u32x4) -> u64x2 { - msa_dotp_u_d(a, b) +unsafe fn __msa_dotp_u_d(a: v4u32, b: v4u32) -> v2u64 { + msa_dotp_u_d(a, ::mem::transmute(b)) } /// Vector Signed Dot Product and Add @@ -3736,8 +3787,8 @@ unsafe fn __msa_dotp_u_d(a: u32x4, b: u32x4) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_s.h))] -unsafe fn __msa_dpadd_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8 { - msa_dpadd_s_h(a, b, c) +unsafe fn __msa_dpadd_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { + msa_dpadd_s_h(a, ::mem::transmute(b), c) } /// Vector Signed Dot Product and Add @@ -3750,8 +3801,8 @@ unsafe fn __msa_dpadd_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_s.w))] -unsafe fn __msa_dpadd_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4 { - msa_dpadd_s_w(a, b, c) +unsafe fn __msa_dpadd_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { + msa_dpadd_s_w(a, ::mem::transmute(b), c) } /// Vector Signed Dot Product and Add @@ -3764,8 +3815,8 @@ unsafe fn __msa_dpadd_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_s.d))] -unsafe fn __msa_dpadd_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2 { - msa_dpadd_s_d(a, b, c) +unsafe fn __msa_dpadd_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { + msa_dpadd_s_d(a, ::mem::transmute(b), c) } /// Vector Unsigned Dot Product and Add @@ -3778,8 +3829,8 @@ unsafe fn __msa_dpadd_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_u.h))] -unsafe fn __msa_dpadd_u_h(a: u16x8, b: u8x16, c: u8x16) -> u16x8 { - msa_dpadd_u_h(a, b, c) +unsafe fn __msa_dpadd_u_h(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { + msa_dpadd_u_h(a, ::mem::transmute(b), c) } /// Vector Unsigned Dot Product and Add @@ -3792,8 +3843,8 @@ unsafe fn __msa_dpadd_u_h(a: u16x8, b: u8x16, c: u8x16) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_u.w))] -unsafe fn __msa_dpadd_u_w(a: u32x4, b: u16x8, c: u16x8) -> u32x4 { - msa_dpadd_u_w(a, b, c) +unsafe fn __msa_dpadd_u_w(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { + msa_dpadd_u_w(a, ::mem::transmute(b), c) } /// Vector Unsigned Dot Product and Add @@ -3806,8 +3857,8 @@ unsafe fn __msa_dpadd_u_w(a: u32x4, b: u16x8, c: u16x8) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_u.d))] -unsafe fn __msa_dpadd_u_d(a: u64x2, b: u32x4, c: u32x4) -> u64x2 { - msa_dpadd_u_d(a, b, c) +unsafe fn __msa_dpadd_u_d(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { + msa_dpadd_u_d(a, ::mem::transmute(b), c) } /// Vector Signed Dot Product and Add @@ -3821,8 +3872,8 @@ unsafe fn __msa_dpadd_u_d(a: u64x2, b: u32x4, c: u32x4) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_s.h))] -unsafe fn __msa_dpsub_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8 { - msa_dpsub_s_h(a, b, c) +unsafe fn __msa_dpsub_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { + msa_dpsub_s_h(a, ::mem::transmute(b), c) } /// Vector Signed Dot Product and Add @@ -3836,8 +3887,8 @@ unsafe fn __msa_dpsub_s_h(a: i16x8, b: i8x16, c: i8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_s.w))] -unsafe fn __msa_dpsub_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4 { - msa_dpsub_s_w(a, b, c) +unsafe fn __msa_dpsub_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { + msa_dpsub_s_w(a, ::mem::transmute(b), c) } /// Vector Signed Dot Product and Add @@ -3851,8 +3902,8 @@ unsafe fn __msa_dpsub_s_w(a: i32x4, b: i16x8, c: i16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_s.d))] -unsafe fn __msa_dpsub_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2 { - msa_dpsub_s_d(a, b, c) +unsafe fn __msa_dpsub_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { + msa_dpsub_s_d(a, ::mem::transmute(b), c) } /// Vector Unsigned Dot Product and Add @@ -3866,8 +3917,8 @@ unsafe fn __msa_dpsub_s_d(a: i64x2, b: i32x4, c: i32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_u.h))] -unsafe fn __msa_dpsub_u_h(a: i16x8, b: u8x16, c: u8x16) -> i16x8 { - msa_dpsub_u_h(a, b, c) +unsafe fn __msa_dpsub_u_h(a: v8i16, b: v16u8, c: v16u8) -> v8i16 { + msa_dpsub_u_h(a, ::mem::transmute(b), c) } /// Vector Unsigned Dot Product and Add @@ -3881,8 +3932,8 @@ unsafe fn __msa_dpsub_u_h(a: i16x8, b: u8x16, c: u8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_u.w))] -unsafe fn __msa_dpsub_u_w(a: i32x4, b: u16x8, c: u16x8) -> i32x4 { - msa_dpsub_u_w(a, b, c) +unsafe fn __msa_dpsub_u_w(a: v4i32, b: v8u16, c: v8u16) -> v4i32 { + msa_dpsub_u_w(a, ::mem::transmute(b), c) } /// Vector Unsigned Dot Product and Add @@ -3896,8 +3947,8 @@ unsafe fn __msa_dpsub_u_w(a: i32x4, b: u16x8, c: u16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_u.d))] -unsafe fn __msa_dpsub_u_d(a: i64x2, b: u32x4, c: u32x4) -> i64x2 { - msa_dpsub_u_d(a, b, c) +unsafe fn __msa_dpsub_u_d(a: v2i64, b: v4u32, c: v4u32) -> v2i64 { + msa_dpsub_u_d(a, ::mem::transmute(b), c) } /// Vector Floating-Point Addition @@ -3909,8 +3960,8 @@ unsafe fn __msa_dpsub_u_d(a: i64x2, b: u32x4, c: u32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fadd.w))] -unsafe fn __msa_fadd_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fadd_w(a, b) +unsafe fn __msa_fadd_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fadd_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Addition @@ -3922,21 +3973,21 @@ unsafe fn __msa_fadd_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fadd.d))] -unsafe fn __msa_fadd_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fadd_d(a, b) +unsafe fn __msa_fadd_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fadd_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Always False /// /// Set all bits to 0 in vector (four signed 32-bit integer numbers) /// Signaling NaN elements in 'a' (four 32-bit floating point numbers) -/// or 'b' (four 32-bit floating point numbers) signal Invalid Operation exception. +/// or 'b' (four 32-bit floating point numbers)signal Invalid Operation exception. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcaf.w))] -unsafe fn __msa_fcaf_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcaf_w(a, b) +unsafe fn __msa_fcaf_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcaf_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Always False @@ -3948,22 +3999,22 @@ unsafe fn __msa_fcaf_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcaf.d))] -unsafe fn __msa_fcaf_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcaf_d(a, b) +unsafe fn __msa_fcaf_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcaf_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) /// elements if the corresponding in 'a' (four 32-bit floating point numbers) -/// and 'b' (four 32-bit floating point numbers) elements are ordered and equal, +/// and 'b' (four 32-bit floating point numbers)elements are ordered and equal, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fceq.w))] -unsafe fn __msa_fceq_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fceq_w(a, b) +unsafe fn __msa_fceq_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fceq_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Equal @@ -3976,8 +4027,8 @@ unsafe fn __msa_fceq_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fceq.d))] -unsafe fn __msa_fceq_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fceq_d(a, b) +unsafe fn __msa_fceq_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fceq_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Class Mask @@ -3992,7 +4043,7 @@ unsafe fn __msa_fceq_d(a: f64x2, b: f64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclass.w))] -unsafe fn __msa_fclass_w(a: f32x4) -> i32x4 { +unsafe fn __msa_fclass_w(a: v4f32) -> v4i32 { msa_fclass_w(a) } @@ -4008,22 +4059,22 @@ unsafe fn __msa_fclass_w(a: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclass.d))] -unsafe fn __msa_fclass_d(a: f64x2) -> i64x2 { +unsafe fn __msa_fclass_d(a: v2f64) -> v2i64 { msa_fclass_d(a) } /// Vector Floating-Point Quiet Compare Less or Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) -/// elements if the corresponding 'a' (four 32-bit floating point numbers) elements are ordered -/// and either less than or equal to 'b' (four 32-bit floating point numbers) elements +/// elements if the corresponding 'a' (four 32-bit floating point numbers)elements are ordered +/// and either less than or equal to 'b' (four 32-bit floating point numbers)elements /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcle.w))] -unsafe fn __msa_fcle_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcle_w(a, b) +unsafe fn __msa_fcle_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcle_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Less or Equal @@ -4036,22 +4087,22 @@ unsafe fn __msa_fcle_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcle.d))] -unsafe fn __msa_fcle_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcle_d(a, b) +unsafe fn __msa_fcle_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcle_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Less Than /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) -/// elements if the corresponding 'a' (four 32-bit floating point numbers) elements are ordered -/// and less than 'b' (four 32-bit floating point numbers) elements +/// elements if the corresponding 'a' (four 32-bit floating point numbers)elements are ordered +/// and less than 'b' (four 32-bit floating point numbers)elements /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclt.w))] -unsafe fn __msa_fclt_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fclt_w(a, b) +unsafe fn __msa_fclt_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fclt_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Less Than @@ -4064,22 +4115,22 @@ unsafe fn __msa_fclt_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclt.d))] -unsafe fn __msa_fclt_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fclt_d(a, b) +unsafe fn __msa_fclt_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fclt_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Not Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) -/// elements if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are ordered and not equal +/// elements if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are ordered and not equal /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcne.w))] -unsafe fn __msa_fcne_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcne_w(a, b) +unsafe fn __msa_fcne_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcne_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Not Equal @@ -4092,22 +4143,22 @@ unsafe fn __msa_fcne_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcne.d))] -unsafe fn __msa_fcne_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcne_d(a, b) +unsafe fn __msa_fcne_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcne_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Ordered /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) -/// elements if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are ordered, i.e. both elementsare not NaN values, +/// elements if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are ordered, i.e. both elementsare not NaN values, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcor.w))] -unsafe fn __msa_fcor_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcor_w(a, b) +unsafe fn __msa_fcor_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcor_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Ordered @@ -4120,22 +4171,22 @@ unsafe fn __msa_fcor_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcor.d))] -unsafe fn __msa_fcor_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcor_d(a, b) +unsafe fn __msa_fcor_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcor_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) -/// elements if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are unordered or equal, +/// elements if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are unordered or equal, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcueq.w))] -unsafe fn __msa_fcueq_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcueq_w(a, b) +unsafe fn __msa_fcueq_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcueq_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Equal @@ -4148,22 +4199,22 @@ unsafe fn __msa_fcueq_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcueq.d))] -unsafe fn __msa_fcueq_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcueq_d(a, b) +unsafe fn __msa_fcueq_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcueq_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Less or Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) /// elements if the corresponding elements in 'a' (four 32-bit floating point numbers) -/// are unordered or less than or equal to 'b' (four 32-bit floating point numbers) elements, +/// are unordered or less than or equal to 'b' (four 32-bit floating point numbers)elements, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcule.w))] -unsafe fn __msa_fcule_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcule_w(a, b) +unsafe fn __msa_fcule_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcule_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Less or Equal @@ -4176,22 +4227,22 @@ unsafe fn __msa_fcule_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcule.d))] -unsafe fn __msa_fcule_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcule_d(a, b) +unsafe fn __msa_fcule_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcule_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Less Than /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) /// elements if the corresponding elements in 'a' (four 32-bit floating point numbers) -/// are unordered or less than 'b' (four 32-bit floating point numbers) elements, +/// are unordered or less than 'b' (four 32-bit floating point numbers)elements, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcult.w))] -unsafe fn __msa_fcult_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcult_w(a, b) +unsafe fn __msa_fcult_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcult_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Less Than @@ -4204,22 +4255,22 @@ unsafe fn __msa_fcult_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcult.d))] -unsafe fn __msa_fcult_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcult_d(a, b) +unsafe fn __msa_fcult_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcult_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) /// elements if the corresponding 'a' (four 32-bit floating point numbers) -/// and 'b' (four 32-bit floating point numbers) elements are unordered, +/// and 'b' (four 32-bit floating point numbers)elements are unordered, /// i.e. at least oneelement is a NaN value, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcun.w))] -unsafe fn __msa_fcun_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcun_w(a, b) +unsafe fn __msa_fcun_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcun_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered @@ -4232,22 +4283,22 @@ unsafe fn __msa_fcun_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcun.d))] -unsafe fn __msa_fcun_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcun_d(a, b) +unsafe fn __msa_fcun_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcun_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Not Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) /// elements if the corresponding 'a' (four 32-bit floating point numbers) -/// and 'b' (four 32-bit floating point numbers) elements are unordered or not equal, +/// and 'b' (four 32-bit floating point numbers)elements are unordered or not equal, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcune.w))] -unsafe fn __msa_fcune_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fcune_w(a, b) +unsafe fn __msa_fcune_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fcune_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Quiet Compare Unordered or Not Equal @@ -4260,8 +4311,8 @@ unsafe fn __msa_fcune_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcune.d))] -unsafe fn __msa_fcune_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fcune_d(a, b) +unsafe fn __msa_fcune_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fcune_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Division @@ -4273,8 +4324,8 @@ unsafe fn __msa_fcune_d(a: f64x2, b: f64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fdiv.w))] -unsafe fn __msa_fdiv_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fdiv_w(a, b) +unsafe fn __msa_fdiv_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fdiv_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Division @@ -4286,8 +4337,8 @@ unsafe fn __msa_fdiv_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fdiv.d))] -unsafe fn __msa_fdiv_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fdiv_d(a, b) +unsafe fn __msa_fdiv_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fdiv_d(a, ::mem::transmute(b)) } /* FIXME: 16-bit float @@ -4301,8 +4352,8 @@ unsafe fn __msa_fdiv_d(a: f64x2, b: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexdo.h))] -unsafe fn __msa_fexdo_h(a: f32x4, b: f32x4) -> f16x8 { - msa_fexdo_h(a, b) +unsafe fn __msa_fexdo_h(a: v4f32, b: v4f32) -> f16x8 { + msa_fexdo_h(a, ::mem::transmute(b)) }*/ /// Vector Floating-Point Down-Convert Interchange Format @@ -4315,8 +4366,8 @@ unsafe fn __msa_fexdo_h(a: f32x4, b: f32x4) -> f16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexdo.w))] -unsafe fn __msa_fexdo_w(a: f64x2, b: f64x2) -> f32x4 { - msa_fexdo_w(a, b) +unsafe fn __msa_fexdo_w(a: v2f64, b: v2f64) -> v4f32 { + msa_fexdo_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Down-Convert Interchange Format @@ -4329,8 +4380,8 @@ unsafe fn __msa_fexdo_w(a: f64x2, b: f64x2) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexp2.w))] -unsafe fn __msa_fexp2_w(a: f32x4, b: i32x4) -> f32x4 { - msa_fexp2_w(a, b) +unsafe fn __msa_fexp2_w(a: v4f32, b: v4i32) -> v4f32 { + msa_fexp2_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Down-Convert Interchange Format @@ -4343,8 +4394,8 @@ unsafe fn __msa_fexp2_w(a: f32x4, b: i32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexp2.d))] -unsafe fn __msa_fexp2_d(a: f64x2, b: i64x2) -> f64x2 { - msa_fexp2_d(a, b) +unsafe fn __msa_fexp2_d(a: v2f64, b: v2i64) -> v2f64 { + msa_fexp2_d(a, ::mem::transmute(b)) } /* FIXME: 16-bit float @@ -4358,7 +4409,7 @@ unsafe fn __msa_fexp2_d(a: f64x2, b: i64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupl.w))] -unsafe fn __msa_fexupl_w(a: f16x8) -> f32x4 { +unsafe fn __msa_fexupl_w(a: f16x8) -> v4f32 { msa_fexupl_w(a) }*/ @@ -4372,7 +4423,7 @@ unsafe fn __msa_fexupl_w(a: f16x8) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupl.d))] -unsafe fn __msa_fexupl_d(a: f32x4) -> f64x2 { +unsafe fn __msa_fexupl_d(a: v4f32) -> v2f64 { msa_fexupl_d(a) } @@ -4387,7 +4438,7 @@ unsafe fn __msa_fexupl_d(a: f32x4) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupr.w))] -unsafe fn __msa_fexupr_w(a: f16x8) -> f32x4 { +unsafe fn __msa_fexupr_w(a: f16x8) -> v4f32 { msa_fexupr_w(a) } */ @@ -4401,7 +4452,7 @@ unsafe fn __msa_fexupr_w(a: f16x8) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupr.d))] -unsafe fn __msa_fexupr_d(a: f32x4) -> f64x2 { +unsafe fn __msa_fexupr_d(a: v4f32) -> v2f64 { msa_fexupr_d(a) } @@ -4414,7 +4465,7 @@ unsafe fn __msa_fexupr_d(a: f32x4) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_s.w))] -unsafe fn __msa_ffint_s_w(a: i32x4) -> f32x4 { +unsafe fn __msa_ffint_s_w(a: v4i32) -> v4f32 { msa_ffint_s_w(a) } @@ -4427,7 +4478,7 @@ unsafe fn __msa_ffint_s_w(a: i32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_s.d))] -unsafe fn __msa_ffint_s_d(a: i64x2) -> f64x2 { +unsafe fn __msa_ffint_s_d(a: v2i64) -> v2f64 { msa_ffint_s_d(a) } @@ -4440,7 +4491,7 @@ unsafe fn __msa_ffint_s_d(a: i64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_u.w))] -unsafe fn __msa_ffint_u_w(a: u32x4) -> f32x4 { +unsafe fn __msa_ffint_u_w(a: v4u32) -> v4f32 { msa_ffint_u_w(a) } @@ -4453,7 +4504,7 @@ unsafe fn __msa_ffint_u_w(a: u32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_u.d))] -unsafe fn __msa_ffint_u_d(a: u64x2) -> f64x2 { +unsafe fn __msa_ffint_u_d(a: v2u64) -> v2f64 { msa_ffint_u_d(a) } @@ -4467,7 +4518,7 @@ unsafe fn __msa_ffint_u_d(a: u64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffql.w))] -unsafe fn __msa_ffql_w(a: i16x8) -> f32x4 { +unsafe fn __msa_ffql_w(a: v8i16) -> v4f32 { msa_ffql_w(a) } @@ -4481,7 +4532,7 @@ unsafe fn __msa_ffql_w(a: i16x8) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffql.d))] -unsafe fn __msa_ffql_d(a: i32x4) -> f64x2 { +unsafe fn __msa_ffql_d(a: v4i32) -> v2f64 { msa_ffql_d(a) } @@ -4495,7 +4546,7 @@ unsafe fn __msa_ffql_d(a: i32x4) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffqr.w))] -unsafe fn __msa_ffqr_w(a: i16x8) -> f32x4 { +unsafe fn __msa_ffqr_w(a: v8i16) -> v4f32 { msa_ffqr_w(a) } @@ -4509,7 +4560,7 @@ unsafe fn __msa_ffqr_w(a: i16x8) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffqr.d))] -unsafe fn __msa_ffqr_d(a: i32x4) -> f64x2 { +unsafe fn __msa_ffqr_d(a: v4i32) -> v2f64 { msa_ffqr_d(a) } @@ -4522,7 +4573,7 @@ unsafe fn __msa_ffqr_d(a: i32x4) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.b))] -unsafe fn __msa_fill_b(a: i32) -> i8x16 { +unsafe fn __msa_fill_b(a: i32) -> v16i8 { msa_fill_b(a) } @@ -4535,7 +4586,7 @@ unsafe fn __msa_fill_b(a: i32) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.h))] -unsafe fn __msa_fill_h(a: i32) -> i16x8 { +unsafe fn __msa_fill_h(a: i32) -> v8i16 { msa_fill_h(a) } @@ -4548,7 +4599,7 @@ unsafe fn __msa_fill_h(a: i32) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.w))] -unsafe fn __msa_fill_w(a: i32) -> i32x4 { +unsafe fn __msa_fill_w(a: i32) -> v4i32 { msa_fill_w(a) } @@ -4561,20 +4612,20 @@ unsafe fn __msa_fill_w(a: i32) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.d))] -unsafe fn __msa_fill_d(a: i64) -> i64x2 { +unsafe fn __msa_fill_d(a: i64) -> v2i64 { msa_fill_d(a) } /// Vector Floating-Point Base 2 Logarithm /// /// The signed integral base 2 exponents of floating-point elements in vector 'a' -/// (four 32-bit floating point numbers) are written as floating-point values to vector elements +/// (four 32-bit floating point numbers)are written as floating-point values to vector elements /// (four 32-bit floating point numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(flog2.w))] -unsafe fn __msa_flog2_w(a: f32x4) -> f32x4 { +unsafe fn __msa_flog2_w(a: v4f32) -> v4f32 { msa_flog2_w(a) } @@ -4587,7 +4638,7 @@ unsafe fn __msa_flog2_w(a: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(flog2.d))] -unsafe fn __msa_flog2_d(a: f64x2) -> f64x2 { +unsafe fn __msa_flog2_d(a: v2f64) -> v2f64 { msa_flog2_d(a) } @@ -4600,8 +4651,8 @@ unsafe fn __msa_flog2_d(a: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmadd.w))] -unsafe fn __msa_fmadd_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4 { - msa_fmadd_w(a, b, c) +unsafe fn __msa_fmadd_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + msa_fmadd_w(a, ::mem::transmute(b), c) } /// Vector Floating-Point Multiply-Add @@ -4613,21 +4664,21 @@ unsafe fn __msa_fmadd_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmadd.d))] -unsafe fn __msa_fmadd_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2 { - msa_fmadd_d(a, b, c) +unsafe fn __msa_fmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + msa_fmadd_d(a, ::mem::transmute(b), c) } /// Vector Floating-Point Maximum /// /// The largest values between corresponding floating-point elements in vector 'a' -/// (four 32-bit floating point numbers) andvector 'b' (four 32-bit floating point numbers) +/// (four 32-bit floating point numbers)andvector 'b' (four 32-bit floating point numbers) /// are written to vector (four 32-bit floating point numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax.w))] -unsafe fn __msa_fmax_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fmax_w(a, b) +unsafe fn __msa_fmax_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fmax_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Maximum @@ -4639,8 +4690,8 @@ unsafe fn __msa_fmax_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax.d))] -unsafe fn __msa_fmax_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fmax_d(a, b) +unsafe fn __msa_fmax_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fmax_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Maximum Based on Absolute Values @@ -4653,8 +4704,8 @@ unsafe fn __msa_fmax_d(a: f64x2, b: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax_a.w))] -unsafe fn __msa_fmax_a_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fmax_a_w(a, b) +unsafe fn __msa_fmax_a_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fmax_a_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Maximum Based on Absolute Values @@ -4667,21 +4718,21 @@ unsafe fn __msa_fmax_a_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax_a.d))] -unsafe fn __msa_fmax_a_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fmax_a_d(a, b) +unsafe fn __msa_fmax_a_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fmax_a_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Minimum /// /// The smallest values between corresponding floating-point elements in vector 'a' -/// (four 32-bit floating point numbers) andvector 'b' (four 32-bit floating point numbers) +/// (four 32-bit floating point numbers)andvector 'b' (four 32-bit floating point numbers) /// are written to vector (four 32-bit floating point numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin.w))] -unsafe fn __msa_fmin_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fmin_w(a, b) +unsafe fn __msa_fmin_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fmin_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Minimum @@ -4693,8 +4744,8 @@ unsafe fn __msa_fmin_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin.d))] -unsafe fn __msa_fmin_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fmin_d(a, b) +unsafe fn __msa_fmin_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fmin_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Minimum Based on Absolute Values @@ -4707,8 +4758,8 @@ unsafe fn __msa_fmin_d(a: f64x2, b: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin_a.w))] -unsafe fn __msa_fmin_a_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fmin_a_w(a, b) +unsafe fn __msa_fmin_a_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fmin_a_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Minimum Based on Absolute Values @@ -4721,47 +4772,46 @@ unsafe fn __msa_fmin_a_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin_a.d))] -unsafe fn __msa_fmin_a_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fmin_a_d(a, b) +unsafe fn __msa_fmin_a_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fmin_a_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Multiply-Sub /// -/// The floating-point elements in vector 'b' (four 32-bit floating point numbers) +/// The floating-point elements in vector 'b' (four 32-bit floating point numbers) /// multiplied by floating-point elements in vector 'c' (four 32-bit floating point numbers) /// are subtracted from the floating-point elements in vector 'a' (four 32-bit floating point numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmsub.w))] -unsafe fn __msa_fmsub_w(a: f32x4, b: f32x4, c: f32x4) -> f32x4 { - msa_fmsub_w(a, b, c) +unsafe fn __msa_fmsub_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + msa_fmsub_w(a, ::mem::transmute(b), c) } /// Vector Floating-Point Multiply-Sub /// -/// The floating-point elements in vector 'b' (two 64-bit floating point numbers) +/// The floating-point elements in vector 'b' (two 64-bit floating point numbers) /// multiplied by floating-point elements in vector 'c' (two 64-bit floating point numbers) /// are subtracted from the floating-point elements in vector 'a' (two 64-bit floating point numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmsub.d))] -unsafe fn __msa_fmsub_d(a: f64x2, b: f64x2, c: f64x2) -> f64x2 { - msa_fmsub_d(a, b, c) +unsafe fn __msa_fmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + msa_fmsub_d(a, ::mem::transmute(b), c) } - /// Vector Floating-Point Multiplication /// -/// The floating-point elements in vector 'a' (four 32-bit floating point numbers) are +/// The floating-point elements in vector 'a' (four 32-bit floating point numbers)are /// multiplied by floating-point elements in vector 'b' (four 32-bit floating point numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmul.w))] -unsafe fn __msa_fmul_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fmul_w(a, b) +unsafe fn __msa_fmul_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fmul_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Multiplication @@ -4772,8 +4822,8 @@ unsafe fn __msa_fmul_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmul.d))] -unsafe fn __msa_fmul_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fmul_d(a, b) +unsafe fn __msa_fmul_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fmul_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Round to Integer @@ -4785,7 +4835,7 @@ unsafe fn __msa_fmul_d(a: f64x2, b: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frint.w))] -unsafe fn __msa_frint_w(a: f32x4) -> f32x4 { +unsafe fn __msa_frint_w(a: v4f32) -> v4f32 { msa_frint_w(a) } @@ -4798,7 +4848,7 @@ unsafe fn __msa_frint_w(a: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frint.d))] -unsafe fn __msa_frint_d(a: f64x2) -> f64x2 { +unsafe fn __msa_frint_d(a: v2f64) -> v2f64 { msa_frint_d(a) } @@ -4810,7 +4860,7 @@ unsafe fn __msa_frint_d(a: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frcp.w))] -unsafe fn __msa_frcp_w(a: f32x4) -> f32x4 { +unsafe fn __msa_frcp_w(a: v4f32) -> v4f32 { msa_frcp_w(a) } @@ -4822,7 +4872,7 @@ unsafe fn __msa_frcp_w(a: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frcp.d))] -unsafe fn __msa_frcp_d(a: f64x2) -> f64x2 { +unsafe fn __msa_frcp_d(a: v2f64) -> v2f64 { msa_frcp_d(a) } @@ -4834,7 +4884,7 @@ unsafe fn __msa_frcp_d(a: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frsqrt.w))] -unsafe fn __msa_frsqrt_w(a: f32x4) -> f32x4 { +unsafe fn __msa_frsqrt_w(a: v4f32) -> v4f32 { msa_frsqrt_w(a) } @@ -4846,7 +4896,7 @@ unsafe fn __msa_frsqrt_w(a: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frsqrt.d))] -unsafe fn __msa_frsqrt_d(a: f64x2) -> f64x2 { +unsafe fn __msa_frsqrt_d(a: v2f64) -> v2f64 { msa_frsqrt_d(a) } @@ -4854,14 +4904,14 @@ unsafe fn __msa_frsqrt_d(a: f64x2) -> f64x2 { /// /// Set all bits to 0 in vector (four signed 32-bit integer numbers) elements. /// Signaling and quiet NaN elements in vector 'a' (four 32-bit floating point numbers) -/// or 'b' (four 32-bit floating point numbers) signal Invalid Operation exception. +/// or 'b' (four 32-bit floating point numbers)signal Invalid Operation exception. /// In case of a floating-point exception, the default result has all bits set to 0 /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsaf.w))] -unsafe fn __msa_fsaf_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsaf_w(a, b) +unsafe fn __msa_fsaf_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsaf_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Always False @@ -4874,47 +4924,47 @@ unsafe fn __msa_fsaf_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsaf.d))] -unsafe fn __msa_fsaf_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsaf_d(a, b) +unsafe fn __msa_fsaf_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsaf_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Equal /// -/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements /// if the corresponding 'a' (four 32-bit floating point numbers) -/// and 'b' (four 32-bit floating point numbers) elements are equal, otherwise set all bits to 0. +/// and 'b' (four 32-bit floating point numbers)elements are equal, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fseq.w))] -unsafe fn __msa_fseq_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fseq_w(a, b) +unsafe fn __msa_fseq_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fseq_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Equal /// -/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements +/// Set all bits to 1 in vector (two signed 64-bit integer numbers) elements /// if the corresponding 'a' (two 64-bit floating point numbers) /// and 'b' (two 64-bit floating point numbers) elementsare equal, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fseq.d))] -unsafe fn __msa_fseq_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fseq_d(a, b) +unsafe fn __msa_fseq_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fseq_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Less or Equal /// -/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) elements -/// are less than or equal to 'b' (four 32-bit floating point numbers) elements, otherwise set all bits to 0. +/// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers)elements +/// are less than or equal to 'b' (four 32-bit floating point numbers)elements, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsle.w))] -unsafe fn __msa_fsle_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsle_w(a, b) +unsafe fn __msa_fsle_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsle_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Less or Equal @@ -4926,21 +4976,21 @@ unsafe fn __msa_fsle_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsle.d))] -unsafe fn __msa_fsle_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsle_d(a, b) +unsafe fn __msa_fsle_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsle_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Less Than /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) elements -/// are less than 'b' (four 32-bit floating point numbers) elements, otherwise set all bits to 0. +/// if the corresponding 'a' (four 32-bit floating point numbers)elements +/// are less than 'b' (four 32-bit floating point numbers)elements, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fslt.w))] -unsafe fn __msa_fslt_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fslt_w(a, b) +unsafe fn __msa_fslt_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fslt_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Less Than @@ -4952,21 +5002,21 @@ unsafe fn __msa_fslt_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fslt.d))] -unsafe fn __msa_fslt_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fslt_d(a, b) +unsafe fn __msa_fslt_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fslt_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Not Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are not equal, otherwise set all bits to 0. +/// if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are not equal, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsne.w))] -unsafe fn __msa_fsne_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsne_w(a, b) +unsafe fn __msa_fsne_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsne_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Not Equal @@ -4978,22 +5028,22 @@ unsafe fn __msa_fsne_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsne.d))] -unsafe fn __msa_fsne_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsne_d(a, b) +unsafe fn __msa_fsne_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsne_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Ordered /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are ordered, +/// if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are ordered, /// i.e. both elementsare not NaN values, otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsor.w))] -unsafe fn __msa_fsor_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsor_w(a, b) +unsafe fn __msa_fsor_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsor_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Ordered @@ -5006,20 +5056,20 @@ unsafe fn __msa_fsor_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsor.d))] -unsafe fn __msa_fsor_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsor_d(a, b) +unsafe fn __msa_fsor_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsor_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Square Root /// /// The square roots of floating-point elements in vector 'a' -/// (four 32-bit floating point numbers) are written to vector -/// (four 32-bit floating point numbers) elements are ordered, +/// (four 32-bit floating point numbers)are written to vector +/// (four 32-bit floating point numbers)elements are ordered, /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsqrt.w))] -unsafe fn __msa_fsqrt_w(a: f32x4) -> f32x4 { +unsafe fn __msa_fsqrt_w(a: v4f32) -> v4f32 { msa_fsqrt_w(a) } @@ -5032,7 +5082,7 @@ unsafe fn __msa_fsqrt_w(a: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsqrt.d))] -unsafe fn __msa_fsqrt_d(a: f64x2) -> f64x2 { +unsafe fn __msa_fsqrt_d(a: v2f64) -> v2f64 { msa_fsqrt_d(a) } @@ -5046,8 +5096,8 @@ unsafe fn __msa_fsqrt_d(a: f64x2) -> f64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsub.w))] -unsafe fn __msa_fsub_w(a: f32x4, b: f32x4) -> f32x4 { - msa_fsub_w(a, b) +unsafe fn __msa_fsub_w(a: v4f32, b: v4f32) -> v4f32 { + msa_fsub_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Subtraction @@ -5060,22 +5110,22 @@ unsafe fn __msa_fsub_w(a: f32x4, b: f32x4) -> f32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsub.d))] -unsafe fn __msa_fsub_d(a: f64x2, b: f64x2) -> f64x2 { - msa_fsub_d(a, b) +unsafe fn __msa_fsub_d(a: v2f64, b: v2f64) -> v2f64 { + msa_fsub_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Ordered /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are unordered or equal, +/// if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are unordered or equal, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsueq.w))] -unsafe fn __msa_fsueq_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsueq_w(a, b) +unsafe fn __msa_fsueq_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsueq_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Ordered @@ -5088,22 +5138,22 @@ unsafe fn __msa_fsueq_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsueq.d))] -unsafe fn __msa_fsueq_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsueq_d(a, b) +unsafe fn __msa_fsueq_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsueq_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered or Less or Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) elements are -/// unordered or less than or equal to 'b' (four 32-bit floating point numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers)elements are +/// unordered or less than or equal to 'b' (four 32-bit floating point numbers)elements /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsule.w))] -unsafe fn __msa_fsule_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsule_w(a, b) +unsafe fn __msa_fsule_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsule_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered or Less or Equal @@ -5116,22 +5166,22 @@ unsafe fn __msa_fsule_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsule.d))] -unsafe fn __msa_fsule_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsule_d(a, b) +unsafe fn __msa_fsule_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsule_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered or Less Than /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) elements -/// are unordered or less than 'b' (four 32-bit floating point numbers) elements +/// if the corresponding 'a' (four 32-bit floating point numbers)elements +/// are unordered or less than 'b' (four 32-bit floating point numbers)elements /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsult.w))] -unsafe fn __msa_fsult_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsult_w(a, b) +unsafe fn __msa_fsult_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsult_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered or Less Than @@ -5144,22 +5194,22 @@ unsafe fn __msa_fsult_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsult.d))] -unsafe fn __msa_fsult_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsult_d(a, b) +unsafe fn __msa_fsult_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsult_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are unordered, +/// if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are unordered, /// i.e. at least one element is a NaN value otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsun.w))] -unsafe fn __msa_fsun_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsun_w(a, b) +unsafe fn __msa_fsun_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsun_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered @@ -5172,22 +5222,22 @@ unsafe fn __msa_fsun_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsun.d))] -unsafe fn __msa_fsun_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsun_d(a, b) +unsafe fn __msa_fsun_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsun_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered or Not Equal /// /// Set all bits to 1 in vector (four signed 32-bit integer numbers) elements -/// if the corresponding 'a' (four 32-bit floating point numbers) and -/// 'b' (four 32-bit floating point numbers) elements are unordered or not equal, +/// if the corresponding 'a' (four 32-bit floating point numbers)and +/// 'b' (four 32-bit floating point numbers)elements are unordered or not equal, /// otherwise set all bits to 0. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsune.w))] -unsafe fn __msa_fsune_w(a: f32x4, b: f32x4) -> i32x4 { - msa_fsune_w(a, b) +unsafe fn __msa_fsune_w(a: v4f32, b: v4f32) -> v4i32 { + msa_fsune_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Signaling Compare Unordered or Not Equal @@ -5200,8 +5250,8 @@ unsafe fn __msa_fsune_w(a: f32x4, b: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsune.d))] -unsafe fn __msa_fsune_d(a: f64x2, b: f64x2) -> i64x2 { - msa_fsune_d(a, b) +unsafe fn __msa_fsune_d(a: v2f64, b: v2f64) -> v2i64 { + msa_fsune_d(a, ::mem::transmute(b)) } /// Vector Floating-Point Convert to Signed Integer @@ -5214,7 +5264,7 @@ unsafe fn __msa_fsune_d(a: f64x2, b: f64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_s.w))] -unsafe fn __msa_ftint_s_w(a: f32x4) -> i32x4 { +unsafe fn __msa_ftint_s_w(a: v4f32) -> v4i32 { msa_ftint_s_w(a) } @@ -5228,7 +5278,7 @@ unsafe fn __msa_ftint_s_w(a: f32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_s.d))] -unsafe fn __msa_ftint_s_d(a: f64x2) -> i64x2 { +unsafe fn __msa_ftint_s_d(a: v2f64) -> v2i64 { msa_ftint_s_d(a) } @@ -5242,7 +5292,7 @@ unsafe fn __msa_ftint_s_d(a: f64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_u.w))] -unsafe fn __msa_ftint_u_w(a: f32x4) -> u32x4 { +unsafe fn __msa_ftint_u_w(a: v4f32) -> v4u32 { msa_ftint_u_w(a) } @@ -5256,14 +5306,14 @@ unsafe fn __msa_ftint_u_w(a: f32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_u.d))] -unsafe fn __msa_ftint_u_d(a: f64x2) -> u64x2 { +unsafe fn __msa_ftint_u_d(a: v2f64) -> v2u64 { msa_ftint_u_d(a) } /// Vector Floating-Point Convert to Fixed-Point /// /// The elements in vector 'a' (four 32-bit floating point numbers) -/// and 'b' (four 32-bit floating point numbers) are down-converted to a fixed-point +/// and 'b' (four 32-bit floating point numbers)are down-converted to a fixed-point /// representation, i.e. from 64-bit floating-point to 32-bit Q31 fixed-point /// representation, or from 32-bit floating-point to 16-bit Q15 fixed-point representation. /// The result is written to vector (eight signed 16-bit integer numbers). @@ -5271,8 +5321,8 @@ unsafe fn __msa_ftint_u_d(a: f64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftq.h))] -unsafe fn __msa_ftq_h(a: f32x4, b: f32x4) -> i16x8 { - msa_ftq_h(a, b) +unsafe fn __msa_ftq_h(a: v4f32, b: v4f32) -> v8i16 { + msa_ftq_h(a, ::mem::transmute(b)) } /// Vector Floating-Point Convert to Fixed-Point @@ -5282,12 +5332,12 @@ unsafe fn __msa_ftq_h(a: f32x4, b: f32x4) -> i16x8 { /// representation, i.e. from 64-bit floating-point to 32-bit Q31 fixed-point /// representation, or from 32-bit floating-point to 16-bit Q15 fixed-point representation. /// The result is written to vector (four signed 32-bit integer numbers). -/// +/// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftq.w))] -unsafe fn __msa_ftq_w(a: f64x2, b: f64x2) -> i32x4 { - msa_ftq_w(a, b) +unsafe fn __msa_ftq_w(a: v2f64, b: v2f64) -> v4i32 { + msa_ftq_w(a, ::mem::transmute(b)) } /// Vector Floating-Point Truncate and Convert to Signed Integer @@ -5295,11 +5345,11 @@ unsafe fn __msa_ftq_w(a: f64x2, b: f64x2) -> i32x4 { /// The elements in vector 'a' (four 32-bit floating point numbers) /// are truncated, i.e. rounded toward zero, to signed integer values. /// The result is written to vector (four signed 32-bit integer numbers). -/// +/// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_s.w))] -unsafe fn __msa_ftrunc_s_w(a: f32x4) -> i32x4 { +unsafe fn __msa_ftrunc_s_w(a: v4f32) -> v4i32 { msa_ftrunc_s_w(a) } @@ -5308,11 +5358,11 @@ unsafe fn __msa_ftrunc_s_w(a: f32x4) -> i32x4 { /// The elements in vector 'a' (two 64-bit floating point numbers) /// are truncated, i.e. rounded toward zero, to signed integer values. /// The result is written to vector (two signed 64-bit integer numbers). -/// +/// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_s.d))] -unsafe fn __msa_ftrunc_s_d(a: f64x2) -> i64x2 { +unsafe fn __msa_ftrunc_s_d(a: v2f64) -> v2i64 { msa_ftrunc_s_d(a) } @@ -5325,7 +5375,7 @@ unsafe fn __msa_ftrunc_s_d(a: f64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_u.w))] -unsafe fn __msa_ftrunc_u_w(a: f32x4) -> u32x4 { +unsafe fn __msa_ftrunc_u_w(a: v4f32) -> v4u32 { msa_ftrunc_u_w(a) } @@ -5338,7 +5388,7 @@ unsafe fn __msa_ftrunc_u_w(a: f32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_u.d))] -unsafe fn __msa_ftrunc_u_d(a: f64x2) -> u64x2 { +unsafe fn __msa_ftrunc_u_d(a: v2f64) -> v2u64 { msa_ftrunc_u_d(a) } @@ -5352,8 +5402,8 @@ unsafe fn __msa_ftrunc_u_d(a: f64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_s.h))] -unsafe fn __msa_hadd_s_h(a: i8x16, b: i8x16) -> i16x8 { - msa_hadd_s_h(a, b) +unsafe fn __msa_hadd_s_h(a: v16i8, b: v16i8) -> v8i16 { + msa_hadd_s_h(a, ::mem::transmute(b)) } /// Vector Signed Horizontal Add @@ -5366,8 +5416,8 @@ unsafe fn __msa_hadd_s_h(a: i8x16, b: i8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_s.w))] -unsafe fn __msa_hadd_s_w(a: i16x8, b: i16x8) -> i32x4 { - msa_hadd_s_w(a, b) +unsafe fn __msa_hadd_s_w(a: v8i16, b: v8i16) -> v4i32 { + msa_hadd_s_w(a, ::mem::transmute(b)) } /// Vector Signed Horizontal Add @@ -5380,8 +5430,8 @@ unsafe fn __msa_hadd_s_w(a: i16x8, b: i16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_s.d))] -unsafe fn __msa_hadd_s_d(a: i32x4, b: i32x4) -> i64x2 { - msa_hadd_s_d(a, b) +unsafe fn __msa_hadd_s_d(a: v4i32, b: v4i32) -> v2i64 { + msa_hadd_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Horizontal Add @@ -5394,8 +5444,8 @@ unsafe fn __msa_hadd_s_d(a: i32x4, b: i32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_u.h))] -unsafe fn __msa_hadd_u_h(a: u8x16, b: u8x16) -> u16x8 { - msa_hadd_u_h(a, b) +unsafe fn __msa_hadd_u_h(a: v16u8, b: v16u8) -> v8u16 { + msa_hadd_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Horizontal Add @@ -5408,8 +5458,8 @@ unsafe fn __msa_hadd_u_h(a: u8x16, b: u8x16) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_u.w))] -unsafe fn __msa_hadd_u_w(a: u16x8, b: u16x8) -> u32x4 { - msa_hadd_u_w(a, b) +unsafe fn __msa_hadd_u_w(a: v8u16, b: v8u16) -> v4u32 { + msa_hadd_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Horizontal Add @@ -5422,8 +5472,8 @@ unsafe fn __msa_hadd_u_w(a: u16x8, b: u16x8) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_u.d))] -unsafe fn __msa_hadd_u_d(a: u32x4, b: u32x4) -> u64x2 { - msa_hadd_u_d(a, b) +unsafe fn __msa_hadd_u_d(a: v4u32, b: v4u32) -> v2u64 { + msa_hadd_u_d(a, ::mem::transmute(b)) } /// Vector Signed Horizontal Subtract @@ -5436,8 +5486,8 @@ unsafe fn __msa_hadd_u_d(a: u32x4, b: u32x4) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_s.h))] -unsafe fn __msa_hsub_s_h(a: i8x16, b: i8x16) -> i16x8 { - msa_hsub_s_h(a, b) +unsafe fn __msa_hsub_s_h(a: v16i8, b: v16i8) -> v8i16 { + msa_hsub_s_h(a, ::mem::transmute(b)) } /// Vector Signed Horizontal Subtract @@ -5450,8 +5500,8 @@ unsafe fn __msa_hsub_s_h(a: i8x16, b: i8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_s.w))] -unsafe fn __msa_hsub_s_w(a: i16x8, b: i16x8) -> i32x4 { - msa_hsub_s_w(a, b) +unsafe fn __msa_hsub_s_w(a: v8i16, b: v8i16) -> v4i32 { + msa_hsub_s_w(a, ::mem::transmute(b)) } /// Vector Signed Horizontal Subtract @@ -5464,8 +5514,8 @@ unsafe fn __msa_hsub_s_w(a: i16x8, b: i16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_s.d))] -unsafe fn __msa_hsub_s_d(a: i32x4, b: i32x4) -> i64x2 { - msa_hsub_s_d(a, b) +unsafe fn __msa_hsub_s_d(a: v4i32, b: v4i32) -> v2i64 { + msa_hsub_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Horizontal Subtract @@ -5478,8 +5528,8 @@ unsafe fn __msa_hsub_s_d(a: i32x4, b: i32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_u.h))] -unsafe fn __msa_hsub_u_h(a: u8x16, b: u8x16) -> i16x8 { - msa_hsub_u_h(a, b) +unsafe fn __msa_hsub_u_h(a: v16u8, b: v16u8) -> v8i16 { + msa_hsub_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Horizontal Subtract @@ -5492,8 +5542,8 @@ unsafe fn __msa_hsub_u_h(a: u8x16, b: u8x16) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_u.w))] -unsafe fn __msa_hsub_u_w(a: u16x8, b: u16x8) -> i32x4 { - msa_hsub_u_w(a, b) +unsafe fn __msa_hsub_u_w(a: v8u16, b: v8u16) -> v4i32 { + msa_hsub_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Horizontal Subtract @@ -5506,8 +5556,8 @@ unsafe fn __msa_hsub_u_w(a: u16x8, b: u16x8) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_u.d))] -unsafe fn __msa_hsub_u_d(a: u32x4, b: u32x4) -> i64x2 { - msa_hsub_u_d(a, b) +unsafe fn __msa_hsub_u_d(a: v4u32, b: v4u32) -> v2i64 { + msa_hsub_u_d(a, ::mem::transmute(b)) } /// Vector Interleave Even @@ -5520,8 +5570,8 @@ unsafe fn __msa_hsub_u_d(a: u32x4, b: u32x4) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.b))] -unsafe fn __msa_ilvev_b(a: i8x16, b: i8x16) -> i8x16 { - msa_ilvev_b(a, b) +unsafe fn __msa_ilvev_b(a: v16i8, b: v16i8) -> v16i8 { + msa_ilvev_b(a, ::mem::transmute(b)) } /// Vector Interleave Even @@ -5534,8 +5584,8 @@ unsafe fn __msa_ilvev_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.h))] -unsafe fn __msa_ilvev_h(a: i16x8, b: i16x8) -> i16x8 { - msa_ilvev_h(a, b) +unsafe fn __msa_ilvev_h(a: v8i16, b: v8i16) -> v8i16 { + msa_ilvev_h(a, ::mem::transmute(b)) } /// Vector Interleave Even @@ -5548,8 +5598,8 @@ unsafe fn __msa_ilvev_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.w))] -unsafe fn __msa_ilvev_w(a: i32x4, b: i32x4) -> i32x4 { - msa_ilvev_w(a, b) +unsafe fn __msa_ilvev_w(a: v4i32, b: v4i32) -> v4i32 { + msa_ilvev_w(a, ::mem::transmute(b)) } /// Vector Interleave Even @@ -5562,8 +5612,8 @@ unsafe fn __msa_ilvev_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.d))] -unsafe fn __msa_ilvev_d(a: i64x2, b: i64x2) -> i64x2 { - msa_ilvev_d(a, b) +unsafe fn __msa_ilvev_d(a: v2i64, b: v2i64) -> v2i64 { + msa_ilvev_d(a, ::mem::transmute(b)) } /// Vector Interleave Left @@ -5576,8 +5626,8 @@ unsafe fn __msa_ilvev_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.b))] -unsafe fn __msa_ilvl_b(a: i8x16, b: i8x16) -> i8x16 { - msa_ilvl_b(a, b) +unsafe fn __msa_ilvl_b(a: v16i8, b: v16i8) -> v16i8 { + msa_ilvl_b(a, ::mem::transmute(b)) } /// Vector Interleave Left @@ -5590,8 +5640,8 @@ unsafe fn __msa_ilvl_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.h))] -unsafe fn __msa_ilvl_h(a: i16x8, b: i16x8) -> i16x8 { - msa_ilvl_h(a, b) +unsafe fn __msa_ilvl_h(a: v8i16, b: v8i16) -> v8i16 { + msa_ilvl_h(a, ::mem::transmute(b)) } /// Vector Interleave Left @@ -5604,8 +5654,8 @@ unsafe fn __msa_ilvl_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.w))] -unsafe fn __msa_ilvl_w(a: i32x4, b: i32x4) -> i32x4 { - msa_ilvl_w(a, b) +unsafe fn __msa_ilvl_w(a: v4i32, b: v4i32) -> v4i32 { + msa_ilvl_w(a, ::mem::transmute(b)) } /// Vector Interleave Left @@ -5618,8 +5668,8 @@ unsafe fn __msa_ilvl_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.d))] -unsafe fn __msa_ilvl_d(a: i64x2, b: i64x2) -> i64x2 { - msa_ilvl_d(a, b) +unsafe fn __msa_ilvl_d(a: v2i64, b: v2i64) -> v2i64 { + msa_ilvl_d(a, ::mem::transmute(b)) } /// Vector Interleave Odd @@ -5632,8 +5682,8 @@ unsafe fn __msa_ilvl_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.b))] -unsafe fn __msa_ilvod_b(a: i8x16, b: i8x16) -> i8x16 { - msa_ilvod_b(a, b) +unsafe fn __msa_ilvod_b(a: v16i8, b: v16i8) -> v16i8 { + msa_ilvod_b(a, ::mem::transmute(b)) } /// Vector Interleave Odd @@ -5646,8 +5696,8 @@ unsafe fn __msa_ilvod_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.h))] -unsafe fn __msa_ilvod_h(a: i16x8, b: i16x8) -> i16x8 { - msa_ilvod_h(a, b) +unsafe fn __msa_ilvod_h(a: v8i16, b: v8i16) -> v8i16 { + msa_ilvod_h(a, ::mem::transmute(b)) } /// Vector Interleave Odd @@ -5660,8 +5710,8 @@ unsafe fn __msa_ilvod_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.w))] -unsafe fn __msa_ilvod_w(a: i32x4, b: i32x4) -> i32x4 { - msa_ilvod_w(a, b) +unsafe fn __msa_ilvod_w(a: v4i32, b: v4i32) -> v4i32 { + msa_ilvod_w(a, ::mem::transmute(b)) } /// Vector Interleave Odd @@ -5674,8 +5724,8 @@ unsafe fn __msa_ilvod_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.d))] -unsafe fn __msa_ilvod_d(a: i64x2, b: i64x2) -> i64x2 { - msa_ilvod_d(a, b) +unsafe fn __msa_ilvod_d(a: v2i64, b: v2i64) -> v2i64 { + msa_ilvod_d(a, ::mem::transmute(b)) } /// Vector Interleave Right @@ -5688,8 +5738,8 @@ unsafe fn __msa_ilvod_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.b))] -unsafe fn __msa_ilvr_b(a: i8x16, b: i8x16) -> i8x16 { - msa_ilvr_b(a, b) +unsafe fn __msa_ilvr_b(a: v16i8, b: v16i8) -> v16i8 { + msa_ilvr_b(a, ::mem::transmute(b)) } /// Vector Interleave Right @@ -5702,8 +5752,8 @@ unsafe fn __msa_ilvr_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.h))] -unsafe fn __msa_ilvr_h(a: i16x8, b: i16x8) -> i16x8 { - msa_ilvr_h(a, b) +unsafe fn __msa_ilvr_h(a: v8i16, b: v8i16) -> v8i16 { + msa_ilvr_h(a, ::mem::transmute(b)) } /// Vector Interleave Right @@ -5716,8 +5766,8 @@ unsafe fn __msa_ilvr_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.w))] -unsafe fn __msa_ilvr_w(a: i32x4, b: i32x4) -> i32x4 { - msa_ilvr_w(a, b) +unsafe fn __msa_ilvr_w(a: v4i32, b: v4i32) -> v4i32 { + msa_ilvr_w(a, ::mem::transmute(b)) } /// Vector Interleave Right @@ -5730,8 +5780,8 @@ unsafe fn __msa_ilvr_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.d))] -unsafe fn __msa_ilvr_d(a: i64x2, b: i64x2) -> i64x2 { - msa_ilvr_d(a, b) +unsafe fn __msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64 { + msa_ilvr_d(a, ::mem::transmute(b)) } /// GPR Insert Element @@ -5744,7 +5794,7 @@ unsafe fn __msa_ilvr_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_insert_b(a: i8x16, imm4: i32, c: i32) -> i8x16 { +unsafe fn __msa_insert_b(a: v16i8, imm4: i32, c: i32) -> v16i8 { macro_rules! call { ($imm4:expr) => { msa_insert_b(a, $imm4, c) @@ -5763,7 +5813,7 @@ unsafe fn __msa_insert_b(a: i8x16, imm4: i32, c: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_insert_h(a: i16x8, imm3: i32, c: i32) -> i16x8 { +unsafe fn __msa_insert_h(a: v8i16, imm3: i32, c: i32) -> v8i16 { macro_rules! call { ($imm3:expr) => { msa_insert_h(a, $imm3, c) @@ -5782,7 +5832,7 @@ unsafe fn __msa_insert_h(a: i16x8, imm3: i32, c: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_insert_w(a: i32x4, imm2: i32, c: i32) -> i32x4 { +unsafe fn __msa_insert_w(a: v4i32, imm2: i32, c: i32) -> v4i32 { macro_rules! call { ($imm2:expr) => { msa_insert_w(a, $imm2, c) @@ -5801,7 +5851,7 @@ unsafe fn __msa_insert_w(a: i32x4, imm2: i32, c: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_insert_d(a: i64x2, imm1: i32, c: i64) -> i64x2 { +unsafe fn __msa_insert_d(a: v2i64, imm1: i32, c: i64) -> v2i64 { macro_rules! call { ($imm1:expr) => { msa_insert_d(a, $imm1, c) @@ -5820,7 +5870,7 @@ unsafe fn __msa_insert_d(a: i64x2, imm1: i32, c: i64) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_insve_b(a: i8x16, imm4: i32, c: i8x16) -> i8x16 { +unsafe fn __msa_insve_b(a: v16i8, imm4: i32, c: v16i8) -> v16i8 { macro_rules! call { ($imm4:expr) => { msa_insve_b(a, $imm4, c) @@ -5839,7 +5889,7 @@ unsafe fn __msa_insve_b(a: i8x16, imm4: i32, c: i8x16) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_insve_h(a: i16x8, imm3: i32, c: i16x8) -> i16x8 { +unsafe fn __msa_insve_h(a: v8i16, imm3: i32, c: v8i16) -> v8i16 { macro_rules! call { ($imm3:expr) => { msa_insve_h(a, $imm3, c) @@ -5858,7 +5908,7 @@ unsafe fn __msa_insve_h(a: i16x8, imm3: i32, c: i16x8) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_insve_w(a: i32x4, imm2: i32, c: i32x4) -> i32x4 { +unsafe fn __msa_insve_w(a: v4i32, imm2: i32, c: v4i32) -> v4i32 { macro_rules! call { ($imm2:expr) => { msa_insve_w(a, $imm2, c) @@ -5877,7 +5927,7 @@ unsafe fn __msa_insve_w(a: i32x4, imm2: i32, c: i32x4) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_insve_d(a: i64x2, imm1: i32, c: i64x2) -> i64x2 { +unsafe fn __msa_insve_d(a: v2i64, imm1: i32, c: v2i64) -> v2i64 { macro_rules! call { ($imm1:expr) => { msa_insve_d(a, $imm1, c) @@ -5888,15 +5938,15 @@ unsafe fn __msa_insve_d(a: i64x2, imm1: i32, c: i64x2) -> i64x2 { /// Vector Load /// -/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base -/// mem_addr and the 10-bit signed immediate offset imm_s10 are fetched and placed in +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s10 are fetched and placed in /// the vector (sixteen signed 8-bit integer numbers) value. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.b, imm_s10 = 0b1111111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ld_b(mem_addr: *mut i8, imm_s10: i32) -> i8x16 { +unsafe fn __msa_ld_b(mem_addr: *mut i8, imm_s10: i32) -> v16i8 { macro_rules! call { ($imm_s10:expr) => { msa_ld_b(mem_addr, $imm_s10) @@ -5907,15 +5957,15 @@ unsafe fn __msa_ld_b(mem_addr: *mut i8, imm_s10: i32) -> i8x16 { /// Vector Load /// -/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base -/// mem_addr and the 10-bit signed immediate offset imm_s11 are fetched and placed in +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s11 are fetched and placed in /// the vector (eight signed 16-bit integer numbers) value. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.h, imm_s11 = 0b11111111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ld_h(mem_addr: *mut i8, imm_s11: i32) -> i16x8 { +unsafe fn __msa_ld_h(mem_addr: *mut i8, imm_s11: i32) -> v8i16 { macro_rules! call { ($imm_s11:expr) => { msa_ld_h(mem_addr, $imm_s11) @@ -5926,15 +5976,15 @@ unsafe fn __msa_ld_h(mem_addr: *mut i8, imm_s11: i32) -> i16x8 { /// Vector Load /// -/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base -/// mem_addr and the 10-bit signed immediate offset imm_s12 are fetched and placed in +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s12 are fetched and placed in /// the vector (four signed 32-bit integer numbers) value. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.w, imm_s12 = 0b111111111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ld_w(mem_addr: *mut i8, imm_s12: i32) -> i32x4 { +unsafe fn __msa_ld_w(mem_addr: *mut i8, imm_s12: i32) -> v4i32 { macro_rules! call { ($imm_s12:expr) => { msa_ld_w(mem_addr, $imm_s12) @@ -5945,15 +5995,15 @@ unsafe fn __msa_ld_w(mem_addr: *mut i8, imm_s12: i32) -> i32x4 { /// Vector Load /// -/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base -/// mem_addr and the 10-bit signed immediate offset imm_s13 are fetched and placed in +/// The WRLEN / 8 bytes at the ef fective memory location addressed by the base +/// mem_addr and the 10-bit signed immediate offset imm_s13 are fetched and placed in /// the vector (two signed 64-bit integer numbers) value. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.d, imm_s13 = 0b1111111111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ld_d(mem_addr: *mut i8, imm_s13: i32) -> i64x2 { +unsafe fn __msa_ld_d(mem_addr: *mut i8, imm_s13: i32) -> v2i64 { macro_rules! call { ($imm_s13:expr) => { msa_ld_d(mem_addr, $imm_s13) @@ -5964,7 +6014,7 @@ unsafe fn __msa_ld_d(mem_addr: *mut i8, imm_s13: i32) -> i64x2 { /// Immediate Load /// -/// The signed immediate imm_s10 is replicated in all vector +/// The signed immediate imm_s10 is replicated in all vector /// (sixteen signed 8-bit integer numbers) elements. For byte elements, /// only the least significant 8 bits of imm_s10 will be used. /// @@ -5972,7 +6022,7 @@ unsafe fn __msa_ld_d(mem_addr: *mut i8, imm_s13: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))] #[rustc_args_required_const(0)] -unsafe fn __msa_ldi_b(imm_s10: i32) -> i8x16 { +unsafe fn __msa_ldi_b(imm_s10: i32) -> v16i8 { macro_rules! call { ($imm_s10:expr) => { msa_ldi_b($imm_s10) @@ -5983,7 +6033,7 @@ unsafe fn __msa_ldi_b(imm_s10: i32) -> i8x16 { /// Immediate Load /// -/// The signed immediate imm_s10 is replicated in all vector +/// The signed immediate imm_s10 is replicated in all vector /// (eight signed 16-bit integer numbers) elements. For byte elements, /// only the least significant 8 bits of imm_s10 will be used. /// @@ -5991,7 +6041,7 @@ unsafe fn __msa_ldi_b(imm_s10: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))] #[rustc_args_required_const(0)] -unsafe fn __msa_ldi_h(imm_s10: i32) -> i16x8 { +unsafe fn __msa_ldi_h(imm_s10: i32) -> v8i16 { macro_rules! call { ($imm_s10:expr) => { msa_ldi_h($imm_s10) @@ -6002,7 +6052,7 @@ unsafe fn __msa_ldi_h(imm_s10: i32) -> i16x8 { /// Immediate Load /// -/// The signed immediate imm_s10 is replicated in all vector +/// The signed immediate imm_s10 is replicated in all vector /// (four signed 32-bit integer numbers) elements. For byte elements, /// only the least significant 8 bits of imm_s10 will be used. /// @@ -6010,7 +6060,7 @@ unsafe fn __msa_ldi_h(imm_s10: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))] #[rustc_args_required_const(0)] -unsafe fn __msa_ldi_w(imm_s10: i32) -> i32x4 { +unsafe fn __msa_ldi_w(imm_s10: i32) -> v4i32 { macro_rules! call { ($imm_s10:expr) => { msa_ldi_w($imm_s10) @@ -6021,7 +6071,7 @@ unsafe fn __msa_ldi_w(imm_s10: i32) -> i32x4 { /// Immediate Load /// -/// The signed immediate imm_s10 is replicated in all vector +/// The signed immediate imm_s10 is replicated in all vector /// (two signed 64-bit integer numbers) elements. For byte elements, /// only the least significant 8 bits of imm_s10 will be used. /// @@ -6029,7 +6079,7 @@ unsafe fn __msa_ldi_w(imm_s10: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))] #[rustc_args_required_const(0)] -unsafe fn __msa_ldi_d(imm_s10: i32) -> i64x2 { +unsafe fn __msa_ldi_d(imm_s10: i32) -> v2i64 { macro_rules! call { ($imm_s10:expr) => { msa_ldi_d($imm_s10) @@ -6040,7 +6090,7 @@ unsafe fn __msa_ldi_d(imm_s10: i32) -> i64x2 { /// Vector Fixed-Point Multiply and Add /// -/// The products of fixed-point elements in 'b' (eight signed 16-bit integer numbers) +/// The products of fixed-point elements in 'b' (eight signed 16-bit integer numbers) /// by fixed-point elements in vector 'c' (eight signed 16-bit integer numbers) /// are added to the fixed-pointelements in vector 'a' (eight signed 16-bit integer numbers) /// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. @@ -6049,13 +6099,13 @@ unsafe fn __msa_ldi_d(imm_s10: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(madd_q.h))] -unsafe fn __msa_madd_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_madd_q_h(a, b, c) +unsafe fn __msa_madd_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_madd_q_h(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply and Add /// -/// The products of fixed-point elements in 'b' (four signed 32-bit integer numbers) +/// The products of fixed-point elements in 'b' (four signed 32-bit integer numbers) /// by fixed-point elements in vector 'c' (four signed 32-bit integer numbers) /// are added to the fixed-pointelements in vector 'a' (four signed 32-bit integer numbers) /// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. @@ -6064,13 +6114,13 @@ unsafe fn __msa_madd_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(madd_q.w))] -unsafe fn __msa_madd_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_madd_q_w(a, b, c) +unsafe fn __msa_madd_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_madd_q_w(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply and Add Rounded /// -/// The products of fixed-point elements in 'b' (eight signed 16-bit integer numbers) +/// The products of fixed-point elements in 'b' (eight signed 16-bit integer numbers) /// by fixed-point elements in vector 'c' (eight signed 16-bit integer numbers) /// are added to the fixed-pointelements in vector 'a' (eight signed 16-bit integer numbers) /// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. @@ -6079,13 +6129,13 @@ unsafe fn __msa_madd_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddr_q.h))] -unsafe fn __msa_maddr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_maddr_q_h(a, b, c) +unsafe fn __msa_maddr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_maddr_q_h(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply and Add Rounded /// -/// The products of fixed-point elements in 'b' (four signed 32-bit integer numbers) +/// The products of fixed-point elements in 'b' (four signed 32-bit integer numbers) /// by fixed-point elements in vector 'c' (four signed 32-bit integer numbers) /// are added to the fixed-pointelements in vector 'a' (four signed 32-bit integer numbers) /// The multiplication result is not saturated, i.e. exact (-1) * (-1) = 1 is added to the destination. @@ -6094,13 +6144,13 @@ unsafe fn __msa_maddr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddr_q.w))] -unsafe fn __msa_maddr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_maddr_q_w(a, b, c) +unsafe fn __msa_maddr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_maddr_q_w(a, ::mem::transmute(b), c) } /// Vector Multiply and Add /// -/// The integer elements in vector 'b' (sixteen signed 8-bit integer numbers) +/// The integer elements in vector 'b' (sixteen signed 8-bit integer numbers) /// are multiplied by integer elements in vector 'c' (sixteen signed 8-bit integer numbers) /// and added to the integer elements in vector 'a' (sixteen signed 8-bit integer numbers) /// The most significant half of the multiplication result is discarded. @@ -6108,13 +6158,13 @@ unsafe fn __msa_maddr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.b))] -unsafe fn __msa_maddv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { - msa_maddv_b(a, b, c) +unsafe fn __msa_maddv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + msa_maddv_b(a, ::mem::transmute(b), c) } /// Vector Multiply and Add /// -/// The integer elements in vector 'b' (eight signed 16-bit integer numbers) +/// The integer elements in vector 'b' (eight signed 16-bit integer numbers) /// are multiplied by integer elements in vector 'c' (eight signed 16-bit integer numbers) /// and added to the integer elements in vector 'a' (eight signed 16-bit integer numbers) /// The most significant half of the multiplication result is discarded. @@ -6122,13 +6172,13 @@ unsafe fn __msa_maddv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.h))] -unsafe fn __msa_maddv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_maddv_h(a, b, c) +unsafe fn __msa_maddv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_maddv_h(a, ::mem::transmute(b), c) } /// Vector Multiply and Add /// -/// The integer elements in vector 'b' (four signed 32-bit integer numbers) +/// The integer elements in vector 'b' (four signed 32-bit integer numbers) /// are multiplied by integer elements in vector 'c' (four signed 32-bit integer numbers) /// and added to the integer elements in vector 'a' (four signed 32-bit integer numbers) /// The most significant half of the multiplication result is discarded. @@ -6136,13 +6186,13 @@ unsafe fn __msa_maddv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.w))] -unsafe fn __msa_maddv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_maddv_w(a, b, c) +unsafe fn __msa_maddv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_maddv_w(a, ::mem::transmute(b), c) } /// Vector Multiply and Add /// -/// The integer elements in vector 'b' (two signed 64-bit integer numbers) +/// The integer elements in vector 'b' (two signed 64-bit integer numbers) /// are multiplied by integer elements in vector 'c' (two signed 64-bit integer numbers) /// and added to the integer elements in vector 'a' (two signed 64-bit integer numbers) /// The most significant half of the multiplication result is discarded. @@ -6150,8 +6200,8 @@ unsafe fn __msa_maddv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.d))] -unsafe fn __msa_maddv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { - msa_maddv_d(a, b, c) +unsafe fn __msa_maddv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + msa_maddv_d(a, ::mem::transmute(b), c) } /// Vector Maximum Based on Absolute Values @@ -6164,8 +6214,8 @@ unsafe fn __msa_maddv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.b))] -unsafe fn __msa_max_a_b(a: i8x16, b: i8x16) -> i8x16 { - msa_max_a_b(a, b) +unsafe fn __msa_max_a_b(a: v16i8, b: v16i8) -> v16i8 { + msa_max_a_b(a, ::mem::transmute(b)) } /// Vector Maximum Based on Absolute Values @@ -6178,8 +6228,8 @@ unsafe fn __msa_max_a_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.h))] -unsafe fn __msa_max_a_h(a: i16x8, b: i16x8) -> i16x8 { - msa_max_a_h(a, b) +unsafe fn __msa_max_a_h(a: v8i16, b: v8i16) -> v8i16 { + msa_max_a_h(a, ::mem::transmute(b)) } /// Vector Maximum Based on Absolute Values @@ -6192,8 +6242,8 @@ unsafe fn __msa_max_a_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.w))] -unsafe fn __msa_max_a_w(a: i32x4, b: i32x4) -> i32x4 { - msa_max_a_w(a, b) +unsafe fn __msa_max_a_w(a: v4i32, b: v4i32) -> v4i32 { + msa_max_a_w(a, ::mem::transmute(b)) } /// Vector Maximum Based on Absolute Values @@ -6206,8 +6256,8 @@ unsafe fn __msa_max_a_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.d))] -unsafe fn __msa_max_a_d(a: i64x2, b: i64x2) -> i64x2 { - msa_max_a_d(a, b) +unsafe fn __msa_max_a_d(a: v2i64, b: v2i64) -> v2i64 { + msa_max_a_d(a, ::mem::transmute(b)) } /// Vector Signed Maximum @@ -6219,8 +6269,8 @@ unsafe fn __msa_max_a_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.b))] -unsafe fn __msa_max_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_max_s_b(a, b) +unsafe fn __msa_max_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_max_s_b(a, ::mem::transmute(b)) } /// Vector Signed Maximum @@ -6232,8 +6282,8 @@ unsafe fn __msa_max_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.h))] -unsafe fn __msa_max_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_max_s_h(a, b) +unsafe fn __msa_max_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_max_s_h(a, ::mem::transmute(b)) } /// Vector Signed Maximum @@ -6245,8 +6295,8 @@ unsafe fn __msa_max_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.w))] -unsafe fn __msa_max_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_max_s_w(a, b) +unsafe fn __msa_max_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_max_s_w(a, ::mem::transmute(b)) } /// Vector Signed Maximum @@ -6258,8 +6308,8 @@ unsafe fn __msa_max_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.d))] -unsafe fn __msa_max_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_max_s_d(a, b) +unsafe fn __msa_max_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_max_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Maximum @@ -6271,8 +6321,8 @@ unsafe fn __msa_max_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.b))] -unsafe fn __msa_max_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_max_u_b(a, b) +unsafe fn __msa_max_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_max_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Maximum @@ -6284,8 +6334,8 @@ unsafe fn __msa_max_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.h))] -unsafe fn __msa_max_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_max_u_h(a, b) +unsafe fn __msa_max_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_max_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Maximum @@ -6297,8 +6347,8 @@ unsafe fn __msa_max_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.w))] -unsafe fn __msa_max_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_max_u_w(a, b) +unsafe fn __msa_max_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_max_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Maximum @@ -6310,8 +6360,8 @@ unsafe fn __msa_max_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.d))] -unsafe fn __msa_max_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_max_u_d(a, b) +unsafe fn __msa_max_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_max_u_d(a, ::mem::transmute(b)) } /// Immediate Signed Maximum @@ -6324,7 +6374,7 @@ unsafe fn __msa_max_u_d(a: u64x2, b: u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.b, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_s_b(a: i8x16, imm_s5: i32) -> i8x16 { +unsafe fn __msa_maxi_s_b(a: v16i8, imm_s5: i32) -> v16i8 { macro_rules! call { ($imm_s5:expr) => { msa_maxi_s_b(a, $imm_s5) @@ -6343,7 +6393,7 @@ unsafe fn __msa_maxi_s_b(a: i8x16, imm_s5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.h, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_s_h(a: i16x8, imm_s5: i32) -> i16x8 { +unsafe fn __msa_maxi_s_h(a: v8i16, imm_s5: i32) -> v8i16 { macro_rules! call { ($imm_s5:expr) => { msa_maxi_s_h(a, $imm_s5) @@ -6362,7 +6412,7 @@ unsafe fn __msa_maxi_s_h(a: i16x8, imm_s5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.w, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_s_w(a: i32x4, imm_s5: i32) -> i32x4 { +unsafe fn __msa_maxi_s_w(a: v4i32, imm_s5: i32) -> v4i32 { macro_rules! call { ($imm_s5:expr) => { msa_maxi_s_w(a, $imm_s5) @@ -6381,7 +6431,7 @@ unsafe fn __msa_maxi_s_w(a: i32x4, imm_s5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.d, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_s_d(a: i64x2, imm_s5: i32) -> i64x2 { +unsafe fn __msa_maxi_s_d(a: v2i64, imm_s5: i32) -> v2i64 { macro_rules! call { ($imm_s5:expr) => { msa_maxi_s_d(a, $imm_s5) @@ -6400,7 +6450,7 @@ unsafe fn __msa_maxi_s_d(a: i64x2, imm_s5: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.b, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_u_b(a: u8x16, imm5: i32) -> u8x16 { +unsafe fn __msa_maxi_u_b(a: v16u8, imm5: i32) -> v16u8 { macro_rules! call { ($imm5:expr) => { msa_maxi_u_b(a, $imm5) @@ -6419,7 +6469,7 @@ unsafe fn __msa_maxi_u_b(a: u8x16, imm5: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.h, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_u_h(a: u16x8, imm5: i32) -> u16x8 { +unsafe fn __msa_maxi_u_h(a: v8u16, imm5: i32) -> v8u16 { macro_rules! call { ($imm5:expr) => { msa_maxi_u_h(a, $imm5) @@ -6438,7 +6488,7 @@ unsafe fn __msa_maxi_u_h(a: u16x8, imm5: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_u_w(a: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_maxi_u_w(a: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { msa_maxi_u_w(a, $imm5) @@ -6457,7 +6507,7 @@ unsafe fn __msa_maxi_u_w(a: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.d, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_maxi_u_d(a: u64x2, imm5: i32) -> u64x2 { +unsafe fn __msa_maxi_u_d(a: v2u64, imm5: i32) -> v2u64 { macro_rules! call { ($imm5:expr) => { msa_maxi_u_d(a, $imm5) @@ -6476,8 +6526,8 @@ unsafe fn __msa_maxi_u_d(a: u64x2, imm5: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.b))] -unsafe fn __msa_min_a_b(a: i8x16, b: i8x16) -> i8x16 { - msa_min_a_b(a, b) +unsafe fn __msa_min_a_b(a: v16i8, b: v16i8) -> v16i8 { + msa_min_a_b(a, ::mem::transmute(b)) } /// Vector Minimum Based on Absolute Value @@ -6490,8 +6540,8 @@ unsafe fn __msa_min_a_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.h))] -unsafe fn __msa_min_a_h(a: i16x8, b: i16x8) -> i16x8 { - msa_min_a_h(a, b) +unsafe fn __msa_min_a_h(a: v8i16, b: v8i16) -> v8i16 { + msa_min_a_h(a, ::mem::transmute(b)) } /// Vector Minimum Based on Absolute Value @@ -6504,8 +6554,8 @@ unsafe fn __msa_min_a_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.w))] -unsafe fn __msa_min_a_w(a: i32x4, b: i32x4) -> i32x4 { - msa_min_a_w(a, b) +unsafe fn __msa_min_a_w(a: v4i32, b: v4i32) -> v4i32 { + msa_min_a_w(a, ::mem::transmute(b)) } /// Vector Minimum Based on Absolute Value @@ -6518,8 +6568,8 @@ unsafe fn __msa_min_a_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.d))] -unsafe fn __msa_min_a_d(a: i64x2, b: i64x2) -> i64x2 { - msa_min_a_d(a, b) +unsafe fn __msa_min_a_d(a: v2i64, b: v2i64) -> v2i64 { + msa_min_a_d(a, ::mem::transmute(b)) } /// Vector Signed Minimum @@ -6531,8 +6581,8 @@ unsafe fn __msa_min_a_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.b))] -unsafe fn __msa_min_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_min_s_b(a, b) +unsafe fn __msa_min_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_min_s_b(a, ::mem::transmute(b)) } /// Vector Signed Minimum @@ -6544,8 +6594,8 @@ unsafe fn __msa_min_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.h))] -unsafe fn __msa_min_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_min_s_h(a, b) +unsafe fn __msa_min_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_min_s_h(a, ::mem::transmute(b)) } /// Vector Signed Minimum @@ -6557,8 +6607,8 @@ unsafe fn __msa_min_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.w))] -unsafe fn __msa_min_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_min_s_w(a, b) +unsafe fn __msa_min_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_min_s_w(a, ::mem::transmute(b)) } /// Vector Signed Minimum @@ -6570,8 +6620,8 @@ unsafe fn __msa_min_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.d))] -unsafe fn __msa_min_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_min_s_d(a, b) +unsafe fn __msa_min_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_min_s_d(a, ::mem::transmute(b)) } /// Immediate Signed Minimum @@ -6584,7 +6634,7 @@ unsafe fn __msa_min_s_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.b, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_s_b(a: i8x16, imm_s5: i32) -> i8x16 { +unsafe fn __msa_mini_s_b(a: v16i8, imm_s5: i32) -> v16i8 { macro_rules! call { ($imm_s5:expr) => { msa_mini_s_b(a, $imm_s5) @@ -6603,7 +6653,7 @@ unsafe fn __msa_mini_s_b(a: i8x16, imm_s5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.h, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_s_h(a: i16x8, imm_s5: i32) -> i16x8 { +unsafe fn __msa_mini_s_h(a: v8i16, imm_s5: i32) -> v8i16 { macro_rules! call { ($imm_s5:expr) => { msa_mini_s_h(a, $imm_s5) @@ -6622,7 +6672,7 @@ unsafe fn __msa_mini_s_h(a: i16x8, imm_s5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.w, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_s_w(a: i32x4, imm_s5: i32) -> i32x4 { +unsafe fn __msa_mini_s_w(a: v4i32, imm_s5: i32) -> v4i32 { macro_rules! call { ($imm_s5:expr) => { msa_mini_s_w(a, $imm_s5) @@ -6641,7 +6691,7 @@ unsafe fn __msa_mini_s_w(a: i32x4, imm_s5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.d, imm_s5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_s_d(a: i64x2, imm_s5: i32) -> i64x2 { +unsafe fn __msa_mini_s_d(a: v2i64, imm_s5: i32) -> v2i64 { macro_rules! call { ($imm_s5:expr) => { msa_mini_s_d(a, $imm_s5) @@ -6659,8 +6709,8 @@ unsafe fn __msa_mini_s_d(a: i64x2, imm_s5: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.b))] -unsafe fn __msa_min_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_min_u_b(a, b) +unsafe fn __msa_min_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_min_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Minimum @@ -6672,8 +6722,8 @@ unsafe fn __msa_min_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.h))] -unsafe fn __msa_min_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_min_u_h(a, b) +unsafe fn __msa_min_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_min_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Minimum @@ -6685,8 +6735,8 @@ unsafe fn __msa_min_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.w))] -unsafe fn __msa_min_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_min_u_w(a, b) +unsafe fn __msa_min_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_min_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Minimum @@ -6698,8 +6748,8 @@ unsafe fn __msa_min_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.d))] -unsafe fn __msa_min_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_min_u_d(a, b) +unsafe fn __msa_min_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_min_u_d(a, ::mem::transmute(b)) } /// Immediate Unsigned Minimum @@ -6712,7 +6762,7 @@ unsafe fn __msa_min_u_d(a: u64x2, b: u64x2) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.b, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_u_b(a: u8x16, imm5: i32) -> u8x16 { +unsafe fn __msa_mini_u_b(a: v16u8, imm5: i32) -> v16u8 { macro_rules! call { ($imm5:expr) => { msa_mini_u_b(a, $imm5) @@ -6731,7 +6781,7 @@ unsafe fn __msa_mini_u_b(a: u8x16, imm5: i32) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.h, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_u_h(a: u16x8, imm5: i32) -> u16x8 { +unsafe fn __msa_mini_u_h(a: v8u16, imm5: i32) -> v8u16 { macro_rules! call { ($imm5:expr) => { msa_mini_u_h(a, $imm5) @@ -6750,7 +6800,7 @@ unsafe fn __msa_mini_u_h(a: u16x8, imm5: i32) -> u16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_u_w(a: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_mini_u_w(a: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { msa_mini_u_w(a, $imm5) @@ -6769,7 +6819,7 @@ unsafe fn __msa_mini_u_w(a: u32x4, imm5: i32) -> u32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.d, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_mini_u_d(a: u64x2, imm5: i32) -> u64x2 { +unsafe fn __msa_mini_u_d(a: v2u64, imm5: i32) -> v2u64 { macro_rules! call { ($imm5:expr) => { msa_mini_u_d(a, $imm5) @@ -6789,8 +6839,8 @@ unsafe fn __msa_mini_u_d(a: u64x2, imm5: i32) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.b))] -unsafe fn __msa_mod_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_mod_s_b(a, b) +unsafe fn __msa_mod_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_mod_s_b(a, ::mem::transmute(b)) } /// Vector Signed Modulo @@ -6804,8 +6854,8 @@ unsafe fn __msa_mod_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.h))] -unsafe fn __msa_mod_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_mod_s_h(a, b) +unsafe fn __msa_mod_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_mod_s_h(a, ::mem::transmute(b)) } /// Vector Signed Modulo @@ -6819,8 +6869,8 @@ unsafe fn __msa_mod_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.w))] -unsafe fn __msa_mod_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_mod_s_w(a, b) +unsafe fn __msa_mod_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_mod_s_w(a, ::mem::transmute(b)) } /// Vector Signed Modulo @@ -6834,8 +6884,8 @@ unsafe fn __msa_mod_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.d))] -unsafe fn __msa_mod_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_mod_s_d(a, b) +unsafe fn __msa_mod_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_mod_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Modulo @@ -6849,8 +6899,8 @@ unsafe fn __msa_mod_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.b))] -unsafe fn __msa_mod_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_mod_u_b(a, b) +unsafe fn __msa_mod_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_mod_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Modulo @@ -6864,8 +6914,8 @@ unsafe fn __msa_mod_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.h))] -unsafe fn __msa_mod_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_mod_u_h(a, b) +unsafe fn __msa_mod_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_mod_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Modulo @@ -6879,8 +6929,8 @@ unsafe fn __msa_mod_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.w))] -unsafe fn __msa_mod_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_mod_u_w(a, b) +unsafe fn __msa_mod_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_mod_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Modulo @@ -6894,8 +6944,8 @@ unsafe fn __msa_mod_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.d))] -unsafe fn __msa_mod_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_mod_u_d(a, b) +unsafe fn __msa_mod_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_mod_u_d(a, ::mem::transmute(b)) } /// Vector Move @@ -6906,7 +6956,7 @@ unsafe fn __msa_mod_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(move.v))] -unsafe fn __msa_move_v(a: i8x16) -> i8x16 { +unsafe fn __msa_move_v(a: v16i8) -> v16i8 { msa_move_v(a) } @@ -6922,8 +6972,8 @@ unsafe fn __msa_move_v(a: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msub_q.h))] -unsafe fn __msa_msub_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_msub_q_h(a, b, c) +unsafe fn __msa_msub_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_msub_q_h(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply and Subtract @@ -6938,8 +6988,8 @@ unsafe fn __msa_msub_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msub_q.w))] -unsafe fn __msa_msub_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_msub_q_w(a, b, c) +unsafe fn __msa_msub_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_msub_q_w(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply and Subtract Rounded @@ -6954,8 +7004,8 @@ unsafe fn __msa_msub_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubr_q.h))] -unsafe fn __msa_msubr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_msubr_q_h(a, b, c) +unsafe fn __msa_msubr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_msubr_q_h(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply and Subtract Rounded @@ -6970,8 +7020,8 @@ unsafe fn __msa_msubr_q_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubr_q.w))] -unsafe fn __msa_msubr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_msubr_q_w(a, b, c) +unsafe fn __msa_msubr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_msubr_q_w(a, ::mem::transmute(b), c) } /// Vector Multiply and Subtract @@ -6984,8 +7034,8 @@ unsafe fn __msa_msubr_q_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.b))] -unsafe fn __msa_msubv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { - msa_msubv_b(a, b, c) +unsafe fn __msa_msubv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + msa_msubv_b(a, ::mem::transmute(b), c) } /// Vector Multiply and Subtract @@ -6998,8 +7048,8 @@ unsafe fn __msa_msubv_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.h))] -unsafe fn __msa_msubv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_msubv_h(a, b, c) +unsafe fn __msa_msubv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_msubv_h(a, ::mem::transmute(b), c) } /// Vector Multiply and Subtract @@ -7012,8 +7062,8 @@ unsafe fn __msa_msubv_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.w))] -unsafe fn __msa_msubv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_msubv_w(a, b, c) +unsafe fn __msa_msubv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_msubv_w(a, ::mem::transmute(b), c) } /// Vector Multiply and Subtract @@ -7026,8 +7076,8 @@ unsafe fn __msa_msubv_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.d))] -unsafe fn __msa_msubv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { - msa_msubv_d(a, b, c) +unsafe fn __msa_msubv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + msa_msubv_d(a, ::mem::transmute(b), c) } /// Vector Fixed-Point Multiply @@ -7039,8 +7089,8 @@ unsafe fn __msa_msubv_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mul_q.h))] -unsafe fn __msa_mul_q_h(a: i16x8, b: i16x8) -> i16x8 { - msa_mul_q_h(a, b) +unsafe fn __msa_mul_q_h(a: v8i16, b: v8i16) -> v8i16 { + msa_mul_q_h(a, ::mem::transmute(b)) } /// Vector Fixed-Point Multiply @@ -7052,8 +7102,8 @@ unsafe fn __msa_mul_q_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mul_q.w))] -unsafe fn __msa_mul_q_w(a: i32x4, b: i32x4) -> i32x4 { - msa_mul_q_w(a, b) +unsafe fn __msa_mul_q_w(a: v4i32, b: v4i32) -> v4i32 { + msa_mul_q_w(a, ::mem::transmute(b)) } /// Vector Fixed-Point Multiply Rounded @@ -7065,8 +7115,8 @@ unsafe fn __msa_mul_q_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulr_q.h))] -unsafe fn __msa_mulr_q_h(a: i16x8, b: i16x8) -> i16x8 { - msa_mulr_q_h(a, b) +unsafe fn __msa_mulr_q_h(a: v8i16, b: v8i16) -> v8i16 { + msa_mulr_q_h(a, ::mem::transmute(b)) } /// Vector Fixed-Point Multiply Rounded @@ -7078,8 +7128,8 @@ unsafe fn __msa_mulr_q_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulr_q.w))] -unsafe fn __msa_mulr_q_w(a: i32x4, b: i32x4) -> i32x4 { - msa_mulr_q_w(a, b) +unsafe fn __msa_mulr_q_w(a: v4i32, b: v4i32) -> v4i32 { + msa_mulr_q_w(a, ::mem::transmute(b)) } /// Vector Multiply @@ -7092,8 +7142,8 @@ unsafe fn __msa_mulr_q_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.b))] -unsafe fn __msa_mulv_b(a: i8x16, b: i8x16) -> i8x16 { - msa_mulv_b(a, b) +unsafe fn __msa_mulv_b(a: v16i8, b: v16i8) -> v16i8 { + msa_mulv_b(a, ::mem::transmute(b)) } /// Vector Multiply @@ -7106,8 +7156,8 @@ unsafe fn __msa_mulv_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.h))] -unsafe fn __msa_mulv_h(a: i16x8, b: i16x8) -> i16x8 { - msa_mulv_h(a, b) +unsafe fn __msa_mulv_h(a: v8i16, b: v8i16) -> v8i16 { + msa_mulv_h(a, ::mem::transmute(b)) } /// Vector Multiply @@ -7120,8 +7170,8 @@ unsafe fn __msa_mulv_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.w))] -unsafe fn __msa_mulv_w(a: i32x4, b: i32x4) -> i32x4 { - msa_mulv_w(a, b) +unsafe fn __msa_mulv_w(a: v4i32, b: v4i32) -> v4i32 { + msa_mulv_w(a, ::mem::transmute(b)) } /// Vector Multiply @@ -7134,8 +7184,8 @@ unsafe fn __msa_mulv_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.d))] -unsafe fn __msa_mulv_d(a: i64x2, b: i64x2) -> i64x2 { - msa_mulv_d(a, b) +unsafe fn __msa_mulv_d(a: v2i64, b: v2i64) -> v2i64 { + msa_mulv_d(a, ::mem::transmute(b)) } /// Vector Leading Ones Count @@ -7146,7 +7196,7 @@ unsafe fn __msa_mulv_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.b))] -unsafe fn __msa_nloc_b(a: i8x16) -> i8x16 { +unsafe fn __msa_nloc_b(a: v16i8) -> v16i8 { msa_nloc_b(a) } @@ -7158,7 +7208,7 @@ unsafe fn __msa_nloc_b(a: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.h))] -unsafe fn __msa_nloc_h(a: i16x8) -> i16x8 { +unsafe fn __msa_nloc_h(a: v8i16) -> v8i16 { msa_nloc_h(a) } @@ -7170,7 +7220,7 @@ unsafe fn __msa_nloc_h(a: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.w))] -unsafe fn __msa_nloc_w(a: i32x4) -> i32x4 { +unsafe fn __msa_nloc_w(a: v4i32) -> v4i32 { msa_nloc_w(a) } @@ -7182,7 +7232,7 @@ unsafe fn __msa_nloc_w(a: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.d))] -unsafe fn __msa_nloc_d(a: i64x2) -> i64x2 { +unsafe fn __msa_nloc_d(a: v2i64) -> v2i64 { msa_nloc_d(a) } @@ -7194,7 +7244,7 @@ unsafe fn __msa_nloc_d(a: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.b))] -unsafe fn __msa_nlzc_b(a: i8x16) -> i8x16 { +unsafe fn __msa_nlzc_b(a: v16i8) -> v16i8 { msa_nlzc_b(a) } @@ -7206,7 +7256,7 @@ unsafe fn __msa_nlzc_b(a: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.h))] -unsafe fn __msa_nlzc_h(a: i16x8) -> i16x8 { +unsafe fn __msa_nlzc_h(a: v8i16) -> v8i16 { msa_nlzc_h(a) } @@ -7218,7 +7268,7 @@ unsafe fn __msa_nlzc_h(a: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.w))] -unsafe fn __msa_nlzc_w(a: i32x4) -> i32x4 { +unsafe fn __msa_nlzc_w(a: v4i32) -> v4i32 { msa_nlzc_w(a) } @@ -7230,7 +7280,7 @@ unsafe fn __msa_nlzc_w(a: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.d))] -unsafe fn __msa_nlzc_d(a: i64x2) -> i64x2 { +unsafe fn __msa_nlzc_d(a: v2i64) -> v2i64 { msa_nlzc_d(a) } @@ -7244,8 +7294,8 @@ unsafe fn __msa_nlzc_d(a: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nor.v))] -unsafe fn __msa_nor_v(a: u8x16, b: u8x16) -> u8x16 { - msa_nor_v(a, b) +unsafe fn __msa_nor_v(a: v16u8, b: v16u8) -> v16u8 { + msa_nor_v(a, ::mem::transmute(b)) } /// Immediate Logical Negated Or @@ -7259,7 +7309,7 @@ unsafe fn __msa_nor_v(a: u8x16, b: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nori.b, imm8 = 0b11111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_nori_b(a: u8x16, imm8: i32) -> u8x16 { +unsafe fn __msa_nori_b(a: v16u8, imm8: i32) -> v16u8 { macro_rules! call { ($imm8:expr) => { msa_nori_b(a, $imm8) @@ -7278,8 +7328,8 @@ unsafe fn __msa_nori_b(a: u8x16, imm8: i32) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(or.v))] -unsafe fn __msa_or_v(a: u8x16, b: u8x16) -> u8x16 { - msa_or_v(a, b) +unsafe fn __msa_or_v(a: v16u8, b: v16u8) -> v16u8 { + msa_or_v(a, ::mem::transmute(b)) } /// Immediate Logical Or @@ -7293,7 +7343,7 @@ unsafe fn __msa_or_v(a: u8x16, b: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ori.b, imm8 = 0b11111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_ori_b(a: u8x16, imm8: i32) -> u8x16 { +unsafe fn __msa_ori_b(a: v16u8, imm8: i32) -> v16u8 { macro_rules! call { ($imm8:expr) => { msa_ori_b(a, $imm8) @@ -7304,167 +7354,167 @@ unsafe fn __msa_ori_b(a: u8x16, imm8: i32) -> u8x16 { /// Vector Pack Even /// -/// Even elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// Even elements in vectors 'a' (sixteen signed 8-bit integer numbers) /// are copied to the left half of the result vector and even elements in vector 'b' /// (sixteen signed 8-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.b))] -unsafe fn __msa_pckev_b(a: i8x16, b: i8x16) -> i8x16 { - msa_pckev_b(a, b) +unsafe fn __msa_pckev_b(a: v16i8, b: v16i8) -> v16i8 { + msa_pckev_b(a, ::mem::transmute(b)) } /// Vector Pack Even /// -/// Even elements in vectors 'a' (eight signed 16-bit integer numbers) +/// Even elements in vectors 'a' (eight signed 16-bit integer numbers) /// are copied to the left half of the result vector and even elements in vector 'b' /// (eight signed 16-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.h))] -unsafe fn __msa_pckev_h(a: i16x8, b: i16x8) -> i16x8 { - msa_pckev_h(a, b) +unsafe fn __msa_pckev_h(a: v8i16, b: v8i16) -> v8i16 { + msa_pckev_h(a, ::mem::transmute(b)) } /// Vector Pack Even /// -/// Even elements in vectors 'a' (four signed 32-bit integer numbers) +/// Even elements in vectors 'a' (four signed 32-bit integer numbers) /// are copied to the left half of the result vector and even elements in vector 'b' /// (four signed 32-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.w))] -unsafe fn __msa_pckev_w(a: i32x4, b: i32x4) -> i32x4 { - msa_pckev_w(a, b) +unsafe fn __msa_pckev_w(a: v4i32, b: v4i32) -> v4i32 { + msa_pckev_w(a, ::mem::transmute(b)) } /// Vector Pack Even /// -/// Even elements in vectors 'a' (two signed 64-bit integer numbers) +/// Even elements in vectors 'a' (two signed 64-bit integer numbers) /// are copied to the left half of the result vector and even elements in vector 'b' /// (two signed 64-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.d))] -unsafe fn __msa_pckev_d(a: i64x2, b: i64x2) -> i64x2 { - msa_pckev_d(a, b) +unsafe fn __msa_pckev_d(a: v2i64, b: v2i64) -> v2i64 { + msa_pckev_d(a, ::mem::transmute(b)) } /// Vector Pack Odd /// -/// Odd elements in vectors 'a' (sixteen signed 8-bit integer numbers) +/// Odd elements in vectors 'a' (sixteen signed 8-bit integer numbers) /// are copied to the left half of the result vector and odd elements in vector 'b' /// (sixteen signed 8-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.b))] -unsafe fn __msa_pckod_b(a: i8x16, b: i8x16) -> i8x16 { - msa_pckod_b(a, b) +unsafe fn __msa_pckod_b(a: v16i8, b: v16i8) -> v16i8 { + msa_pckod_b(a, ::mem::transmute(b)) } /// Vector Pack Odd /// -/// Odd elements in vectors 'a' (eight signed 16-bit integer numbers) +/// Odd elements in vectors 'a' (eight signed 16-bit integer numbers) /// are copied to the left half of the result vector and odd elements in vector 'b' /// (eight signed 16-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.h))] -unsafe fn __msa_pckod_h(a: i16x8, b: i16x8) -> i16x8 { - msa_pckod_h(a, b) +unsafe fn __msa_pckod_h(a: v8i16, b: v8i16) -> v8i16 { + msa_pckod_h(a, ::mem::transmute(b)) } /// Vector Pack Odd /// -/// Odd elements in vectors 'a' (four signed 32-bit integer numbers) +/// Odd elements in vectors 'a' (four signed 32-bit integer numbers) /// are copied to the left half of the result vector and odd elements in vector 'b' /// (four signed 32-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.w))] -unsafe fn __msa_pckod_w(a: i32x4, b: i32x4) -> i32x4 { - msa_pckod_w(a, b) +unsafe fn __msa_pckod_w(a: v4i32, b: v4i32) -> v4i32 { + msa_pckod_w(a, ::mem::transmute(b)) } /// Vector Pack Odd /// -/// Odd elements in vectors 'a' (two signed 64-bit integer numbers) +/// Odd elements in vectors 'a' (two signed 64-bit integer numbers) /// are copied to the left half of the result vector and odd elements in vector 'b' /// (two signed 64-bit integer numbers) are copied to the right half of the result vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.d))] -unsafe fn __msa_pckod_d(a: i64x2, b: i64x2) -> i64x2 { - msa_pckod_d(a, b) +unsafe fn __msa_pckod_d(a: v2i64, b: v2i64) -> v2i64 { + msa_pckod_d(a, ::mem::transmute(b)) } /// Vector Population Count /// -/// The number of bits set to 1 for elements in vector 'a' (sixteen signed 8-bit integer numbers) -/// is stored to the elements in the result vector (sixteen signed 8-bit integer numbers) +/// The number of bits set to 1 for elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// is stored to the elements in the result vector (sixteen signed 8-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.b))] -unsafe fn __msa_pcnt_b(a: i8x16) -> i8x16 { +unsafe fn __msa_pcnt_b(a: v16i8) -> v16i8 { msa_pcnt_b(a) } /// Vector Population Count /// -/// The number of bits set to 1 for elements in vector 'a' (eight signed 16-bit integer numbers) -/// is stored to the elements in the result vector (eight signed 16-bit integer numbers) +/// The number of bits set to 1 for elements in vector 'a' (eight signed 16-bit integer numbers) +/// is stored to the elements in the result vector (eight signed 16-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.h))] -unsafe fn __msa_pcnt_h(a: i16x8) -> i16x8 { +unsafe fn __msa_pcnt_h(a: v8i16) -> v8i16 { msa_pcnt_h(a) } /// Vector Population Count /// -/// The number of bits set to 1 for elements in vector 'a' (four signed 32-bit integer numbers) -/// is stored to the elements in the result vector (four signed 32-bit integer numbers) +/// The number of bits set to 1 for elements in vector 'a' (four signed 32-bit integer numbers) +/// is stored to the elements in the result vector (four signed 32-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.w))] -unsafe fn __msa_pcnt_w(a: i32x4) -> i32x4 { +unsafe fn __msa_pcnt_w(a: v4i32) -> v4i32 { msa_pcnt_w(a) } /// Vector Population Count /// -/// The number of bits set to 1 for elements in vector 'a' (two signed 64-bit integer numbers) -/// is stored to the elements in the result vector (two signed 64-bit integer numbers) +/// The number of bits set to 1 for elements in vector 'a' (two signed 64-bit integer numbers) +/// is stored to the elements in the result vector (two signed 64-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.d))] -unsafe fn __msa_pcnt_d(a: i64x2) -> i64x2 { +unsafe fn __msa_pcnt_d(a: v2i64) -> v2i64 { msa_pcnt_d(a) } /// Immediate Signed Saturate /// -/// Signed elements in vector 'a' (sixteen signed 8-bit integer numbers) +/// Signed elements in vector 'a' (sixteen signed 8-bit integer numbers) /// are saturated to signed values of imm3+1 bits without changing the data width -/// The result is stored in the vector (sixteen signed 8-bit integer numbers) +/// The result is stored in the vector (sixteen signed 8-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.b, imm4 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_s_b(a: i8x16, imm3: i32) -> i8x16 { +unsafe fn __msa_sat_s_b(a: v16i8, imm3: i32) -> v16i8 { macro_rules! call { ($imm3:expr) => { msa_sat_s_b(a, $imm3) @@ -7475,15 +7525,15 @@ unsafe fn __msa_sat_s_b(a: i8x16, imm3: i32) -> i8x16 { /// Immediate Signed Saturate /// -/// Signed elements in vector 'a' (eight signed 16-bit integer numbers) +/// Signed elements in vector 'a' (eight signed 16-bit integer numbers) /// are saturated to signed values of imm4+1 bits without changing the data width -/// The result is stored in the vector (eight signed 16-bit integer numbers) +/// The result is stored in the vector (eight signed 16-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.h, imm3 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_s_h(a: i16x8, imm4: i32) -> i16x8 { +unsafe fn __msa_sat_s_h(a: v8i16, imm4: i32) -> v8i16 { macro_rules! call { ($imm4:expr) => { msa_sat_s_h(a, $imm4) @@ -7494,15 +7544,15 @@ unsafe fn __msa_sat_s_h(a: i16x8, imm4: i32) -> i16x8 { /// Immediate Signed Saturate /// -/// Signed elements in vector 'a' (four signed 32-bit integer numbers) +/// Signed elements in vector 'a' (four signed 32-bit integer numbers) /// are saturated to signed values of imm5+1 bits without changing the data width -/// The result is stored in the vector (four signed 32-bit integer numbers) +/// The result is stored in the vector (four signed 32-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.w, imm2 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_s_w(a: i32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_sat_s_w(a: v4i32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_sat_s_w(a, $imm5) @@ -7513,15 +7563,15 @@ unsafe fn __msa_sat_s_w(a: i32x4, imm5: i32) -> i32x4 { /// Immediate Signed Saturate /// -/// Signed elements in vector 'a' (two signed 64-bit integer numbers) +/// Signed elements in vector 'a' (two signed 64-bit integer numbers) /// are saturated to signed values of imm6+1 bits without changing the data width -/// The result is stored in the vector (two signed 64-bit integer numbers) +/// The result is stored in the vector (two signed 64-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.d, imm1 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_s_d(a: i64x2, imm6: i32) -> i64x2 { +unsafe fn __msa_sat_s_d(a: v2i64, imm6: i32) -> v2i64 { macro_rules! call { ($imm6:expr) => { msa_sat_s_d(a, $imm6) @@ -7532,15 +7582,15 @@ unsafe fn __msa_sat_s_d(a: i64x2, imm6: i32) -> i64x2 { /// Immediate Unsigned Saturate /// -/// Unsigned elements in vector 'a' (sixteen unsigned 8-bit integer numbers) +/// Unsigned elements in vector 'a' (sixteen unsigned 8-bit integer numbers) /// are saturated to unsigned values of imm3+1 bits without changing the data width -/// The result is stored in the vector (sixteen unsigned 8-bit integer numbers) +/// The result is stored in the vector (sixteen unsigned 8-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.b, imm4 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_u_b(a: u8x16, imm3: i32) -> u8x16 { +unsafe fn __msa_sat_u_b(a: v16u8, imm3: i32) -> v16u8 { macro_rules! call { ($imm3:expr) => { msa_sat_u_b(a, $imm3) @@ -7551,15 +7601,15 @@ unsafe fn __msa_sat_u_b(a: u8x16, imm3: i32) -> u8x16 { /// Immediate Unsigned Saturate /// -/// Unsigned elements in vector 'a' (eight unsigned 16-bit integer numbers) +/// Unsigned elements in vector 'a' (eight unsigned 16-bit integer numbers) /// are saturated to unsigned values of imm4+1 bits without changing the data width -/// The result is stored in the vector (eight unsigned 16-bit integer numbers) +/// The result is stored in the vector (eight unsigned 16-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.h, imm3 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_u_h(a: u16x8, imm4: i32) -> u16x8 { +unsafe fn __msa_sat_u_h(a: v8u16, imm4: i32) -> v8u16 { macro_rules! call { ($imm4:expr) => { msa_sat_u_h(a, $imm4) @@ -7570,15 +7620,15 @@ unsafe fn __msa_sat_u_h(a: u16x8, imm4: i32) -> u16x8 { /// Immediate Unsigned Saturate /// -/// Unsigned elements in vector 'a' (four unsigned 32-bit integer numbers) +/// Unsigned elements in vector 'a' (four unsigned 32-bit integer numbers) /// are saturated to unsigned values of imm5+1 bits without changing the data width -/// The result is stored in the vector (four unsigned 32-bit integer numbers) +/// The result is stored in the vector (four unsigned 32-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.w, imm2 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_u_w(a: u32x4, imm5: i32) -> u32x4 { +unsafe fn __msa_sat_u_w(a: v4u32, imm5: i32) -> v4u32 { macro_rules! call { ($imm5:expr) => { msa_sat_u_w(a, $imm5) @@ -7589,15 +7639,15 @@ unsafe fn __msa_sat_u_w(a: u32x4, imm5: i32) -> u32x4 { /// Immediate Unsigned Saturate /// -/// Unsigned elements in vector 'a' (two unsigned 64-bit integer numbers) +/// Unsigned elements in vector 'a' (two unsigned 64-bit integer numbers) /// are saturated to unsigned values of imm6+1 bits without changing the data width -/// The result is stored in the vector (two unsigned 64-bit integer numbers) +/// The result is stored in the vector (two unsigned 64-bit integer numbers) /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.d, imm1 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_sat_u_d(a: u64x2, imm6: i32) -> u64x2 { +unsafe fn __msa_sat_u_d(a: v2u64, imm6: i32) -> v2u64 { macro_rules! call { ($imm6:expr) => { msa_sat_u_d(a, $imm6) @@ -7608,7 +7658,7 @@ unsafe fn __msa_sat_u_d(a: u64x2, imm6: i32) -> u64x2 { /// Immediate Set Shuffle Elements /// -/// The set shuffle instruction works on 4-element sets. +/// The set shuffle instruction works on 4-element sets. /// All sets are shuffled in the same way: the element i82i+1..2i in 'a' /// (sixteen signed 8-bit integer numbers) is copied over the element i in result vector /// (sixteen signed 8-bit integer numbers), where i is 0, 1, 2, 3. @@ -7617,7 +7667,7 @@ unsafe fn __msa_sat_u_d(a: u64x2, imm6: i32) -> u64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(shf.b, imm8 = 0b11111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_shf_b(a: i8x16, imm8: i32) -> i8x16 { +unsafe fn __msa_shf_b(a: v16i8, imm8: i32) -> v16i8 { macro_rules! call { ($imm8:expr) => { msa_shf_b(a, $imm8) @@ -7628,7 +7678,7 @@ unsafe fn __msa_shf_b(a: i8x16, imm8: i32) -> i8x16 { /// Immediate Set Shuffle Elements /// -/// The set shuffle instruction works on 4-element sets. +/// The set shuffle instruction works on 4-element sets. /// All sets are shuffled in the same way: the element i82i+1..2i in 'a' /// (eight signed 16-bit integer numbers) is copied over the element i in result vector /// (eight signed 16-bit integer numbers), where i is 0, 1, 2, 3. @@ -7637,7 +7687,7 @@ unsafe fn __msa_shf_b(a: i8x16, imm8: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(shf.h, imm8 = 0b11111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_shf_h(a: i16x8, imm8: i32) -> i16x8 { +unsafe fn __msa_shf_h(a: v8i16, imm8: i32) -> v8i16 { macro_rules! call { ($imm8:expr) => { msa_shf_h(a, $imm8) @@ -7648,7 +7698,7 @@ unsafe fn __msa_shf_h(a: i16x8, imm8: i32) -> i16x8 { /// Immediate Set Shuffle Elements /// -/// The set shuffle instruction works on 4-element sets. +/// The set shuffle instruction works on 4-element sets. /// All sets are shuffled in the same way: the element i82i+1..2i in 'a' /// (four signed 32-bit integer numbers) is copied over the element i in result vector /// (four signed 32-bit integer numbers), where i is 0, 1, 2, 3. @@ -7657,7 +7707,7 @@ unsafe fn __msa_shf_h(a: i16x8, imm8: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(shf.w, imm8 = 0b11111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_shf_w(a: i32x4, imm8: i32) -> i32x4 { +unsafe fn __msa_shf_w(a: v4i32, imm8: i32) -> v4i32 { macro_rules! call { ($imm8:expr) => { msa_shf_w(a, $imm8) @@ -7674,7 +7724,7 @@ unsafe fn __msa_shf_w(a: i32x4, imm8: i32) -> i32x4 { /// The two source rectangles 'b' and 'a' are concatenated horizontally in the order /// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination /// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' -/// by the number of columns given in GPR 'c'. +/// by the number of columns given in GPR 'c'. /// The result is written to vector (sixteen signed 8-bit integer numbers). /// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, /// or equivalently, the number of data format df elements in the destination vector. @@ -7682,8 +7732,8 @@ unsafe fn __msa_shf_w(a: i32x4, imm8: i32) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.b))] -unsafe fn __msa_sld_b(a: i8x16, b: i8x16, c: i32) -> i8x16 { - msa_sld_b(a, b, c) +unsafe fn __msa_sld_b(a: v16i8, b: v16i8, c: i32) -> v16i8 { + msa_sld_b(a, ::mem::transmute(b), c) } /// GPR Columns Slide @@ -7694,7 +7744,7 @@ unsafe fn __msa_sld_b(a: i8x16, b: i8x16, c: i32) -> i8x16 { /// The two source rectangles 'b' and 'a' are concatenated horizontally in the order /// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination /// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' -/// by the number of columns given in GPR 'c'. +/// by the number of columns given in GPR 'c'. /// The result is written to vector (eight signed 16-bit integer numbers). /// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, /// or equivalently, the number of data format df elements in the destination vector. @@ -7702,8 +7752,8 @@ unsafe fn __msa_sld_b(a: i8x16, b: i8x16, c: i32) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.h))] -unsafe fn __msa_sld_h(a: i16x8, b: i16x8, c: i32) -> i16x8 { - msa_sld_h(a, b, c) +unsafe fn __msa_sld_h(a: v8i16, b: v8i16, c: i32) -> v8i16 { + msa_sld_h(a, ::mem::transmute(b), c) } /// GPR Columns Slide @@ -7714,7 +7764,7 @@ unsafe fn __msa_sld_h(a: i16x8, b: i16x8, c: i32) -> i16x8 { /// The two source rectangles 'b' and 'a' are concatenated horizontally in the order /// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination /// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' -/// by the number of columns given in GPR 'c'. +/// by the number of columns given in GPR 'c'. /// The result is written to vector (four signed 32-bit integer numbers). /// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, /// or equivalently, the number of data format df elements in the destination vector. @@ -7722,8 +7772,8 @@ unsafe fn __msa_sld_h(a: i16x8, b: i16x8, c: i32) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.w))] -unsafe fn __msa_sld_w(a: i32x4, b: i32x4, c: i32) -> i32x4 { - msa_sld_w(a, b, c) +unsafe fn __msa_sld_w(a: v4i32, b: v4i32, c: i32) -> v4i32 { + msa_sld_w(a, ::mem::transmute(b), c) } /// GPR Columns Slide @@ -7734,7 +7784,7 @@ unsafe fn __msa_sld_w(a: i32x4, b: i32x4, c: i32) -> i32x4 { /// The two source rectangles 'b' and 'a' are concatenated horizontally in the order /// they appear in the syntax, i.e. first 'a' and then 'b'. Place a new destination /// rectangle over 'b' and then slide it to the left over the concatenation of 'a' and 'b' -/// by the number of columns given in GPR 'c'. +/// by the number of columns given in GPR 'c'. /// The result is written to vector (two signed 64-bit integer numbers). /// GPR 'c' value is interpreted modulo the number of columns in destination rectangle, /// or equivalently, the number of data format df elements in the destination vector. @@ -7742,8 +7792,8 @@ unsafe fn __msa_sld_w(a: i32x4, b: i32x4, c: i32) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.d))] -unsafe fn __msa_sld_d(a: i64x2, b: i64x2, c: i32) -> i64x2 { - msa_sld_d(a, b, c) +unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 { + msa_sld_d(a, ::mem::transmute(b), c) } /// Immediate Columns Slide @@ -7761,10 +7811,10 @@ unsafe fn __msa_sld_d(a: i64x2, b: i64x2, c: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))] #[rustc_args_required_const(2)] -unsafe fn __msa_sldi_b(a: i8x16, b:i8x16, imm4: i32) -> i8x16 { +unsafe fn __msa_sldi_b(a: v16i8, b: v16i8, imm4: i32) -> v16i8 { macro_rules! call { ($imm4:expr) => { - msa_sldi_b(a, b, $imm4) + msa_sldi_b(a, ::mem::transmute(b), $imm4) }; } constify_imm4!(imm4, call) @@ -7785,10 +7835,10 @@ unsafe fn __msa_sldi_b(a: i8x16, b:i8x16, imm4: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))] #[rustc_args_required_const(2)] -unsafe fn __msa_sldi_h(a: i16x8, b:i16x8, imm3: i32) -> i16x8 { +unsafe fn __msa_sldi_h(a: v8i16, b: v8i16, imm3: i32) -> v8i16 { macro_rules! call { ($imm3:expr) => { - msa_sldi_h(a, b, $imm3) + msa_sldi_h(a, ::mem::transmute(b), $imm3) }; } constify_imm3!(imm3, call) @@ -7809,10 +7859,10 @@ unsafe fn __msa_sldi_h(a: i16x8, b:i16x8, imm3: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))] #[rustc_args_required_const(2)] -unsafe fn __msa_sldi_w(a: i32x4, b:i32x4, imm2: i32) -> i32x4 { +unsafe fn __msa_sldi_w(a: v4i32, b: v4i32, imm2: i32) -> v4i32 { macro_rules! call { ($imm2:expr) => { - msa_sldi_w(a, b, $imm2) + msa_sldi_w(a, ::mem::transmute(b), $imm2) }; } constify_imm2!(imm2, call) @@ -7833,10 +7883,10 @@ unsafe fn __msa_sldi_w(a: i32x4, b:i32x4, imm2: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))] #[rustc_args_required_const(2)] -unsafe fn __msa_sldi_d(a: i64x2, b:i64x2, imm1: i32) -> i64x2 { +unsafe fn __msa_sldi_d(a: v2i64, b: v2i64, imm1: i32) -> v2i64 { macro_rules! call { ($imm1:expr) => { - msa_sldi_d(a, b, $imm1) + msa_sldi_d(a, ::mem::transmute(b), $imm1) }; } constify_imm1!(imm1, call) @@ -7852,8 +7902,8 @@ unsafe fn __msa_sldi_d(a: i64x2, b:i64x2, imm1: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.b))] -unsafe fn __msa_sll_b(a: i8x16, b: i8x16) -> i8x16 { - msa_sll_b(a, b) +unsafe fn __msa_sll_b(a: v16i8, b: v16i8) -> v16i8 { + msa_sll_b(a, ::mem::transmute(b)) } /// Vector Shift Left @@ -7866,8 +7916,8 @@ unsafe fn __msa_sll_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.h))] -unsafe fn __msa_sll_h(a: i16x8, b: i16x8) -> i16x8 { - msa_sll_h(a, b) +unsafe fn __msa_sll_h(a: v8i16, b: v8i16) -> v8i16 { + msa_sll_h(a, ::mem::transmute(b)) } /// Vector Shift Left @@ -7880,8 +7930,8 @@ unsafe fn __msa_sll_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.w))] -unsafe fn __msa_sll_w(a: i32x4, b: i32x4) -> i32x4 { - msa_sll_w(a, b) +unsafe fn __msa_sll_w(a: v4i32, b: v4i32) -> v4i32 { + msa_sll_w(a, ::mem::transmute(b)) } /// Vector Shift Left @@ -7894,8 +7944,8 @@ unsafe fn __msa_sll_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.d))] -unsafe fn __msa_sll_d(a: i64x2, b: i64x2) -> i64x2 { - msa_sll_d(a, b) +unsafe fn __msa_sll_d(a: v2i64, b: v2i64) -> v2i64 { + msa_sll_d(a, ::mem::transmute(b)) } /// Immediate Shift Left @@ -7908,7 +7958,7 @@ unsafe fn __msa_sll_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_slli_b(a: i8x16, imm4: i32) -> i8x16 { +unsafe fn __msa_slli_b(a: v16i8, imm4: i32) -> v16i8 { macro_rules! call { ($imm4:expr) => { msa_slli_b(a, $imm4) @@ -7927,7 +7977,7 @@ unsafe fn __msa_slli_b(a: i8x16, imm4: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_slli_h(a: i16x8, imm3: i32) -> i16x8 { +unsafe fn __msa_slli_h(a: v8i16, imm3: i32) -> v8i16 { macro_rules! call { ($imm3:expr) => { msa_slli_h(a, $imm3) @@ -7946,7 +7996,7 @@ unsafe fn __msa_slli_h(a: i16x8, imm3: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_slli_w(a: i32x4, imm2: i32) -> i32x4 { +unsafe fn __msa_slli_w(a: v4i32, imm2: i32) -> v4i32 { macro_rules! call { ($imm2:expr) => { msa_slli_w(a, $imm2) @@ -7965,7 +8015,7 @@ unsafe fn __msa_slli_w(a: i32x4, imm2: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_slli_d(a: i64x2, imm1: i32) -> i64x2 { +unsafe fn __msa_slli_d(a: v2i64, imm1: i32) -> v2i64 { macro_rules! call { ($imm1:expr) => { msa_slli_d(a, $imm1) @@ -7977,57 +8027,57 @@ unsafe fn __msa_slli_d(a: i64x2, imm1: i32) -> i64x2 { /// GPR Element Splat /// /// Replicate vector 'a'(sixteen signed 8-bit integer numbers) -/// element with index given by GPR 'b' to all elements in vector +/// element with index given by GPR 'b' to all elements in vector /// (sixteen signed 8-bit integer numbers) GPR 'b' value is interpreted /// modulo the number of data format df elements in the destination vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.b))] -unsafe fn __msa_splat_b(a: i8x16, b: i32) -> i8x16 { - msa_splat_b(a, b) +unsafe fn __msa_splat_b(a: v16i8, b: i32) -> v16i8 { + msa_splat_b(a, ::mem::transmute(b)) } /// GPR Element Splat /// /// Replicate vector 'a'(eight signed 16-bit integer numbers) -/// element with index given by GPR 'b' to all elements in vector +/// element with index given by GPR 'b' to all elements in vector /// (eight signed 16-bit integer numbers) GPR 'b' value is interpreted /// modulo the number of data format df elements in the destination vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.h))] -unsafe fn __msa_splat_h(a: i16x8, b: i32) -> i16x8 { - msa_splat_h(a, b) +unsafe fn __msa_splat_h(a: v8i16, b: i32) -> v8i16 { + msa_splat_h(a, ::mem::transmute(b)) } /// GPR Element Splat /// /// Replicate vector 'a'(four signed 32-bit integer numbers) -/// element with index given by GPR 'b' to all elements in vector +/// element with index given by GPR 'b' to all elements in vector /// (four signed 32-bit integer numbers) GPR 'b' value is interpreted /// modulo the number of data format df elements in the destination vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.w))] -unsafe fn __msa_splat_w(a: i32x4, b: i32) -> i32x4 { - msa_splat_w(a, b) +unsafe fn __msa_splat_w(a: v4i32, b: i32) -> v4i32 { + msa_splat_w(a, ::mem::transmute(b)) } /// GPR Element Splat /// /// Replicate vector 'a'(two signed 64-bit integer numbers) -/// element with index given by GPR 'b' to all elements in vector +/// element with index given by GPR 'b' to all elements in vector /// (two signed 64-bit integer numbers) GPR 'b' value is interpreted /// modulo the number of data format df elements in the destination vector. /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.d))] -unsafe fn __msa_splat_d(a: i64x2, b: i32) -> i64x2 { - msa_splat_d(a, b) +unsafe fn __msa_splat_d(a: v2i64, b: i32) -> v2i64 { + msa_splat_d(a, ::mem::transmute(b)) } /// Immediate Element Splat @@ -8039,7 +8089,7 @@ unsafe fn __msa_splat_d(a: i64x2, b: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_splati_b(a: i8x16, imm4: i32) -> i8x16 { +unsafe fn __msa_splati_b(a: v16i8, imm4: i32) -> v16i8 { macro_rules! call { ($imm4:expr) => { msa_splati_b(a, $imm4) @@ -8057,7 +8107,7 @@ unsafe fn __msa_splati_b(a: i8x16, imm4: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_splati_h(a: i16x8, imm3: i32) -> i16x8 { +unsafe fn __msa_splati_h(a: v8i16, imm3: i32) -> v8i16 { macro_rules! call { ($imm3:expr) => { msa_splati_h(a, $imm3) @@ -8075,7 +8125,7 @@ unsafe fn __msa_splati_h(a: i16x8, imm3: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_splati_w(a: i32x4, imm2: i32) -> i32x4 { +unsafe fn __msa_splati_w(a: v4i32, imm2: i32) -> v4i32 { macro_rules! call { ($imm2:expr) => { msa_splati_w(a, $imm2) @@ -8093,7 +8143,7 @@ unsafe fn __msa_splati_w(a: i32x4, imm2: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_splati_d(a: i64x2, imm1: i32) -> i64x2 { +unsafe fn __msa_splati_d(a: v2i64, imm1: i32) -> v2i64 { macro_rules! call { ($imm1:expr) => { msa_splati_d(a, $imm1) @@ -8112,8 +8162,8 @@ unsafe fn __msa_splati_d(a: i64x2, imm1: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.b))] -unsafe fn __msa_sra_b(a: i8x16, b: i8x16) -> i8x16 { - msa_sra_b(a, b) +unsafe fn __msa_sra_b(a: v16i8, b: v16i8) -> v16i8 { + msa_sra_b(a, ::mem::transmute(b)) } /// Vector Shift Right Arithmetic @@ -8126,8 +8176,8 @@ unsafe fn __msa_sra_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.h))] -unsafe fn __msa_sra_h(a: i16x8, b: i16x8) -> i16x8 { - msa_sra_h(a, b) +unsafe fn __msa_sra_h(a: v8i16, b: v8i16) -> v8i16 { + msa_sra_h(a, ::mem::transmute(b)) } /// Vector Shift Right Arithmetic @@ -8140,8 +8190,8 @@ unsafe fn __msa_sra_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.w))] -unsafe fn __msa_sra_w(a: i32x4, b: i32x4) -> i32x4 { - msa_sra_w(a, b) +unsafe fn __msa_sra_w(a: v4i32, b: v4i32) -> v4i32 { + msa_sra_w(a, ::mem::transmute(b)) } /// Vector Shift Right Arithmetic @@ -8154,8 +8204,8 @@ unsafe fn __msa_sra_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.d))] -unsafe fn __msa_sra_d(a: i64x2, b: i64x2) -> i64x2 { - msa_sra_d(a, b) +unsafe fn __msa_sra_d(a: v2i64, b: v2i64) -> v2i64 { + msa_sra_d(a, ::mem::transmute(b)) } /// Immediate Shift Right Arithmetic @@ -8168,7 +8218,7 @@ unsafe fn __msa_sra_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.b, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srai_b(a: i8x16, imm3: i32) -> i8x16 { +unsafe fn __msa_srai_b(a: v16i8, imm3: i32) -> v16i8 { macro_rules! call { ($imm3:expr) => { msa_srai_b(a, $imm3) @@ -8187,7 +8237,7 @@ unsafe fn __msa_srai_b(a: i8x16, imm3: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.h, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srai_h(a: i16x8, imm4: i32) -> i16x8 { +unsafe fn __msa_srai_h(a: v8i16, imm4: i32) -> v8i16 { macro_rules! call { ($imm4:expr) => { msa_srai_h(a, $imm4) @@ -8206,7 +8256,7 @@ unsafe fn __msa_srai_h(a: i16x8, imm4: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srai_w(a: i32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_srai_w(a: v4i32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_srai_w(a, $imm5) @@ -8225,7 +8275,7 @@ unsafe fn __msa_srai_w(a: i32x4, imm5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.d, imm6 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srai_d(a: i64x2, imm6: i32) -> i64x2 { +unsafe fn __msa_srai_d(a: v2i64, imm6: i32) -> v2i64 { macro_rules! call { ($imm6:expr) => { msa_srai_d(a, $imm6) @@ -8245,8 +8295,8 @@ unsafe fn __msa_srai_d(a: i64x2, imm6: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.b))] -unsafe fn __msa_srar_b(a: i8x16, b: i8x16) -> i8x16 { - msa_srar_b(a, b) +unsafe fn __msa_srar_b(a: v16i8, b: v16i8) -> v16i8 { + msa_srar_b(a, ::mem::transmute(b)) } /// Vector Shift Right Arithmetic Rounded @@ -8260,8 +8310,8 @@ unsafe fn __msa_srar_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.h))] -unsafe fn __msa_srar_h(a: i16x8, b: i16x8) -> i16x8 { - msa_srar_h(a, b) +unsafe fn __msa_srar_h(a: v8i16, b: v8i16) -> v8i16 { + msa_srar_h(a, ::mem::transmute(b)) } /// Vector Shift Right Arithmetic Rounded @@ -8275,8 +8325,8 @@ unsafe fn __msa_srar_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.w))] -unsafe fn __msa_srar_w(a: i32x4, b: i32x4) -> i32x4 { - msa_srar_w(a, b) +unsafe fn __msa_srar_w(a: v4i32, b: v4i32) -> v4i32 { + msa_srar_w(a, ::mem::transmute(b)) } /// Vector Shift Right Arithmetic Rounded @@ -8290,22 +8340,22 @@ unsafe fn __msa_srar_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.d))] -unsafe fn __msa_srar_d(a: i64x2, b: i64x2) -> i64x2 { - msa_srar_d(a, b) +unsafe fn __msa_srar_d(a: v2i64, b: v2i64) -> v2i64 { + msa_srar_d(a, ::mem::transmute(b)) } /// Immediate Shift Right Arithmetic Rounded /// /// The elements in vector 'a'(sixteen signed 8-bit integer numbers) /// are shifted right arithmetic by imm3 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(sixteen signed 8-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.b, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srari_b(a: i8x16, imm3: i32) -> i8x16 { +unsafe fn __msa_srari_b(a: v16i8, imm3: i32) -> v16i8 { macro_rules! call { ($imm3:expr) => { msa_srari_b(a, $imm3) @@ -8318,14 +8368,14 @@ unsafe fn __msa_srari_b(a: i8x16, imm3: i32) -> i8x16 { /// /// The elements in vector 'a'(eight signed 16-bit integer numbers) /// are shifted right arithmetic by imm4 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(eight signed 16-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.h, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srari_h(a: i16x8, imm4: i32) -> i16x8 { +unsafe fn __msa_srari_h(a: v8i16, imm4: i32) -> v8i16 { macro_rules! call { ($imm4:expr) => { msa_srari_h(a, $imm4) @@ -8338,14 +8388,14 @@ unsafe fn __msa_srari_h(a: i16x8, imm4: i32) -> i16x8 { /// /// The elements in vector 'a'(four signed 32-bit integer numbers) /// are shifted right arithmetic by imm5 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(four signed 32-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srari_w(a: i32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_srari_w(a: v4i32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_srari_w(a, $imm5) @@ -8358,14 +8408,14 @@ unsafe fn __msa_srari_w(a: i32x4, imm5: i32) -> i32x4 { /// /// The elements in vector 'a'(two signed 64-bit integer numbers) /// are shifted right arithmetic by imm6 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(two signed 64-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.d, imm6 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srari_d(a: i64x2, imm6: i32) -> i64x2 { +unsafe fn __msa_srari_d(a: v2i64, imm6: i32) -> v2i64 { macro_rules! call { ($imm6:expr) => { msa_srari_d(a, $imm6) @@ -8384,8 +8434,8 @@ unsafe fn __msa_srari_d(a: i64x2, imm6: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.b))] -unsafe fn __msa_srl_b(a: i8x16, b: i8x16) -> i8x16 { - msa_srl_b(a, b) +unsafe fn __msa_srl_b(a: v16i8, b: v16i8) -> v16i8 { + msa_srl_b(a, ::mem::transmute(b)) } /// Vector Shift Right Logical @@ -8398,8 +8448,8 @@ unsafe fn __msa_srl_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.h))] -unsafe fn __msa_srl_h(a: i16x8, b: i16x8) -> i16x8 { - msa_srl_h(a, b) +unsafe fn __msa_srl_h(a: v8i16, b: v8i16) -> v8i16 { + msa_srl_h(a, ::mem::transmute(b)) } /// Vector Shift Right Logical @@ -8412,8 +8462,8 @@ unsafe fn __msa_srl_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.w))] -unsafe fn __msa_srl_w(a: i32x4, b: i32x4) -> i32x4 { - msa_srl_w(a, b) +unsafe fn __msa_srl_w(a: v4i32, b: v4i32) -> v4i32 { + msa_srl_w(a, ::mem::transmute(b)) } /// Vector Shift Right Logical @@ -8426,8 +8476,8 @@ unsafe fn __msa_srl_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.d))] -unsafe fn __msa_srl_d(a: i64x2, b: i64x2) -> i64x2 { - msa_srl_d(a, b) +unsafe fn __msa_srl_d(a: v2i64, b: v2i64) -> v2i64 { + msa_srl_d(a, ::mem::transmute(b)) } /// Immediate Shift Right Logical @@ -8440,7 +8490,7 @@ unsafe fn __msa_srl_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.b, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srli_b(a: i8x16, imm4: i32) -> i8x16 { +unsafe fn __msa_srli_b(a: v16i8, imm4: i32) -> v16i8 { macro_rules! call { ($imm4:expr) => { msa_srli_b(a, $imm4) @@ -8459,7 +8509,7 @@ unsafe fn __msa_srli_b(a: i8x16, imm4: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.h, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srli_h(a: i16x8, imm3: i32) -> i16x8 { +unsafe fn __msa_srli_h(a: v8i16, imm3: i32) -> v8i16 { macro_rules! call { ($imm3:expr) => { msa_srli_h(a, $imm3) @@ -8478,7 +8528,7 @@ unsafe fn __msa_srli_h(a: i16x8, imm3: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.w, imm2 = 0b11))] #[rustc_args_required_const(1)] -unsafe fn __msa_srli_w(a: i32x4, imm2: i32) -> i32x4 { +unsafe fn __msa_srli_w(a: v4i32, imm2: i32) -> v4i32 { macro_rules! call { ($imm2:expr) => { msa_srli_w(a, $imm2) @@ -8497,7 +8547,7 @@ unsafe fn __msa_srli_w(a: i32x4, imm2: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.d, imm1 = 0b1))] #[rustc_args_required_const(1)] -unsafe fn __msa_srli_d(a: i64x2, imm1: i32) -> i64x2 { +unsafe fn __msa_srli_d(a: v2i64, imm1: i32) -> v2i64 { macro_rules! call { ($imm1:expr) => { msa_srli_d(a, $imm1) @@ -8517,8 +8567,8 @@ unsafe fn __msa_srli_d(a: i64x2, imm1: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.b))] -unsafe fn __msa_srlr_b(a: i8x16, b: i8x16) -> i8x16 { - msa_srlr_b(a, b) +unsafe fn __msa_srlr_b(a: v16i8, b: v16i8) -> v16i8 { + msa_srlr_b(a, ::mem::transmute(b)) } /// Vector Shift Right Logical Rounded @@ -8532,8 +8582,8 @@ unsafe fn __msa_srlr_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.h))] -unsafe fn __msa_srlr_h(a: i16x8, b: i16x8) -> i16x8 { - msa_srlr_h(a, b) +unsafe fn __msa_srlr_h(a: v8i16, b: v8i16) -> v8i16 { + msa_srlr_h(a, ::mem::transmute(b)) } /// Vector Shift Right Logical Rounded @@ -8547,8 +8597,8 @@ unsafe fn __msa_srlr_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.w))] -unsafe fn __msa_srlr_w(a: i32x4, b: i32x4) -> i32x4 { - msa_srlr_w(a, b) +unsafe fn __msa_srlr_w(a: v4i32, b: v4i32) -> v4i32 { + msa_srlr_w(a, ::mem::transmute(b)) } /// Vector Shift Right Logical Rounded @@ -8562,22 +8612,22 @@ unsafe fn __msa_srlr_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.d))] -unsafe fn __msa_srlr_d(a: i64x2, b: i64x2) -> i64x2 { - msa_srlr_d(a, b) +unsafe fn __msa_srlr_d(a: v2i64, b: v2i64) -> v2i64 { + msa_srlr_d(a, ::mem::transmute(b)) } /// Immediate Shift Right Logical Rounded /// /// The elements in vector 'a'(sixteen signed 8-bit integer numbers) /// are shifted right logical by imm6 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(sixteen signed 8-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.b, imm3 = 0b111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srlri_b(a: i8x16, imm3: i32) -> i8x16 { +unsafe fn __msa_srlri_b(a: v16i8, imm3: i32) -> v16i8 { macro_rules! call { ($imm3:expr) => { msa_srlri_b(a, $imm3) @@ -8590,14 +8640,14 @@ unsafe fn __msa_srlri_b(a: i8x16, imm3: i32) -> i8x16 { /// /// The elements in vector 'a'(eight signed 16-bit integer numbers) /// are shifted right logical by imm6 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(eight signed 16-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.h, imm4 = 0b1111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srlri_h(a: i16x8, imm4: i32) -> i16x8 { +unsafe fn __msa_srlri_h(a: v8i16, imm4: i32) -> v8i16 { macro_rules! call { ($imm4:expr) => { msa_srlri_h(a, $imm4) @@ -8610,14 +8660,14 @@ unsafe fn __msa_srlri_h(a: i16x8, imm4: i32) -> i16x8 { /// /// The elements in vector 'a'(four signed 32-bit integer numbers) /// are shifted right logical by imm6 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(four signed 32-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.w, imm5 = 0b11111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srlri_w(a: i32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_srlri_w(a: v4i32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_srlri_w(a, $imm5) @@ -8630,14 +8680,14 @@ unsafe fn __msa_srlri_w(a: i32x4, imm5: i32) -> i32x4 { /// /// The elements in vector 'a'(two signed 64-bit integer numbers) /// are shifted right logical by imm6 bits.The most significant -/// discarded bit is added to the shifted value (for rounding) and +/// discarded bit is added to the shifted value (for rounding) and /// the result is written to vector(two signed 64-bit integer numbers). /// #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.d, imm6 = 0b111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_srlri_d(a: i64x2, imm6: i32) -> i64x2 { +unsafe fn __msa_srlri_d(a: v2i64, imm6: i32) -> v2i64 { macro_rules! call { ($imm6:expr) => { msa_srlri_d(a, $imm6) @@ -8656,7 +8706,7 @@ unsafe fn __msa_srlri_d(a: i64x2, imm6: i32) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_st_b(a: i8x16, mem_addr: *mut i8, imm_s10: i32) -> () { +unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut i8, imm_s10: i32) -> () { macro_rules! call { ($imm_s10:expr) => { msa_st_b(a, mem_addr, $imm_s10) @@ -8675,7 +8725,7 @@ unsafe fn __msa_st_b(a: i8x16, mem_addr: *mut i8, imm_s10: i32) -> () { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_st_h(a: i16x8, mem_addr: *mut i8, imm_s11: i32) -> () { +unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut i8, imm_s11: i32) -> () { macro_rules! call { ($imm_s11:expr) => { msa_st_h(a, mem_addr, $imm_s11) @@ -8694,7 +8744,7 @@ unsafe fn __msa_st_h(a: i16x8, mem_addr: *mut i8, imm_s11: i32) -> () { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_st_w(a: i32x4, mem_addr: *mut i8, imm_s12: i32) -> () { +unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut i8, imm_s12: i32) -> () { macro_rules! call { ($imm_s12:expr) => { msa_st_w(a, mem_addr, $imm_s12) @@ -8713,7 +8763,7 @@ unsafe fn __msa_st_w(a: i32x4, mem_addr: *mut i8, imm_s12: i32) -> () { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))] #[rustc_args_required_const(2)] -unsafe fn __msa_st_d(a: i64x2, mem_addr: *mut i8, imm_s13: i32) -> () { +unsafe fn __msa_st_d(a: v2i64, mem_addr: *mut i8, imm_s13: i32) -> () { macro_rules! call { ($imm_s13:expr) => { msa_st_d(a, mem_addr, $imm_s13) @@ -8732,8 +8782,8 @@ unsafe fn __msa_st_d(a: i64x2, mem_addr: *mut i8, imm_s13: i32) -> () { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.b))] -unsafe fn __msa_subs_s_b(a: i8x16, b: i8x16) -> i8x16 { - msa_subs_s_b(a, b) +unsafe fn __msa_subs_s_b(a: v16i8, b: v16i8) -> v16i8 { + msa_subs_s_b(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Signed Values @@ -8746,8 +8796,8 @@ unsafe fn __msa_subs_s_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.h))] -unsafe fn __msa_subs_s_h(a: i16x8, b: i16x8) -> i16x8 { - msa_subs_s_h(a, b) +unsafe fn __msa_subs_s_h(a: v8i16, b: v8i16) -> v8i16 { + msa_subs_s_h(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Signed Values @@ -8760,8 +8810,8 @@ unsafe fn __msa_subs_s_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.w))] -unsafe fn __msa_subs_s_w(a: i32x4, b: i32x4) -> i32x4 { - msa_subs_s_w(a, b) +unsafe fn __msa_subs_s_w(a: v4i32, b: v4i32) -> v4i32 { + msa_subs_s_w(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Signed Values @@ -8774,8 +8824,8 @@ unsafe fn __msa_subs_s_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.d))] -unsafe fn __msa_subs_s_d(a: i64x2, b: i64x2) -> i64x2 { - msa_subs_s_d(a, b) +unsafe fn __msa_subs_s_d(a: v2i64, b: v2i64) -> v2i64 { + msa_subs_s_d(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Unsigned Values @@ -8788,8 +8838,8 @@ unsafe fn __msa_subs_s_d(a: i64x2, b: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.b))] -unsafe fn __msa_subs_u_b(a: u8x16, b: u8x16) -> u8x16 { - msa_subs_u_b(a, b) +unsafe fn __msa_subs_u_b(a: v16u8, b: v16u8) -> v16u8 { + msa_subs_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Unsigned Values @@ -8802,8 +8852,8 @@ unsafe fn __msa_subs_u_b(a: u8x16, b: u8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.h))] -unsafe fn __msa_subs_u_h(a: u16x8, b: u16x8) -> u16x8 { - msa_subs_u_h(a, b) +unsafe fn __msa_subs_u_h(a: v8u16, b: v8u16) -> v8u16 { + msa_subs_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Unsigned Values @@ -8816,8 +8866,8 @@ unsafe fn __msa_subs_u_h(a: u16x8, b: u16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.w))] -unsafe fn __msa_subs_u_w(a: u32x4, b: u32x4) -> u32x4 { - msa_subs_u_w(a, b) +unsafe fn __msa_subs_u_w(a: v4u32, b: v4u32) -> v4u32 { + msa_subs_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Unsigned Values @@ -8830,8 +8880,8 @@ unsafe fn __msa_subs_u_w(a: u32x4, b: u32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.d))] -unsafe fn __msa_subs_u_d(a: u64x2, b: u64x2) -> u64x2 { - msa_subs_u_d(a, b) +unsafe fn __msa_subs_u_d(a: v2u64, b: v2u64) -> v2u64 { + msa_subs_u_d(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Signed from Unsigned @@ -8844,8 +8894,8 @@ unsafe fn __msa_subs_u_d(a: u64x2, b: u64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.b))] -unsafe fn __msa_subsus_u_b(a: u8x16, b: i8x16) -> u8x16 { - msa_subsus_u_b(a, b) +unsafe fn __msa_subsus_u_b(a: v16u8, b: v16i8) -> v16u8 { + msa_subsus_u_b(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Signed from Unsigned @@ -8858,8 +8908,8 @@ unsafe fn __msa_subsus_u_b(a: u8x16, b: i8x16) -> u8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.h))] -unsafe fn __msa_subsus_u_h(a: u16x8, b: i16x8) -> u16x8 { - msa_subsus_u_h(a, b) +unsafe fn __msa_subsus_u_h(a: v8u16, b: v8i16) -> v8u16 { + msa_subsus_u_h(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Signed from Unsigned @@ -8872,8 +8922,8 @@ unsafe fn __msa_subsus_u_h(a: u16x8, b: i16x8) -> u16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.w))] -unsafe fn __msa_subsus_u_w(a: u32x4, b: i32x4) -> u32x4 { - msa_subsus_u_w(a, b) +unsafe fn __msa_subsus_u_w(a: v4u32, b: v4i32) -> v4u32 { + msa_subsus_u_w(a, ::mem::transmute(b)) } /// Vector Unsigned Saturated Subtract of Signed from Unsigned @@ -8886,8 +8936,8 @@ unsafe fn __msa_subsus_u_w(a: u32x4, b: i32x4) -> u32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.d))] -unsafe fn __msa_subsus_u_d(a: u64x2, b: i64x2) -> u64x2 { - msa_subsus_u_d(a, b) +unsafe fn __msa_subsus_u_d(a: v2u64, b: v2i64) -> v2u64 { + msa_subsus_u_d(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Unsigned Values @@ -8900,8 +8950,8 @@ unsafe fn __msa_subsus_u_d(a: u64x2, b: i64x2) -> u64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.b))] -unsafe fn __msa_subsuu_s_b(a: u8x16, b: u8x16) -> i8x16 { - msa_subsuu_s_b(a, b) +unsafe fn __msa_subsuu_s_b(a: v16u8, b: v16u8) -> v16i8 { + msa_subsuu_s_b(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Unsigned Values @@ -8914,8 +8964,8 @@ unsafe fn __msa_subsuu_s_b(a: u8x16, b: u8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.h))] -unsafe fn __msa_subsuu_s_h(a: u16x8, b: u16x8) -> i16x8 { - msa_subsuu_s_h(a, b) +unsafe fn __msa_subsuu_s_h(a: v8u16, b: v8u16) -> v8i16 { + msa_subsuu_s_h(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Unsigned Values @@ -8928,8 +8978,8 @@ unsafe fn __msa_subsuu_s_h(a: u16x8, b: u16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.w))] -unsafe fn __msa_subsuu_s_w(a: u32x4, b: u32x4) -> i32x4 { - msa_subsuu_s_w(a, b) +unsafe fn __msa_subsuu_s_w(a: v4u32, b: v4u32) -> v4i32 { + msa_subsuu_s_w(a, ::mem::transmute(b)) } /// Vector Signed Saturated Subtract of Unsigned Values @@ -8942,8 +8992,8 @@ unsafe fn __msa_subsuu_s_w(a: u32x4, b: u32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.d))] -unsafe fn __msa_subsuu_s_d(a: u64x2, b: u64x2) -> i64x2 { - msa_subsuu_s_d(a, b) +unsafe fn __msa_subsuu_s_d(a: v2u64, b: v2u64) -> v2i64 { + msa_subsuu_s_d(a, ::mem::transmute(b)) } /// Vector Subtract @@ -8955,8 +9005,8 @@ unsafe fn __msa_subsuu_s_d(a: u64x2, b: u64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.b))] -unsafe fn __msa_subv_b(a: i8x16, b: i8x16) -> i8x16 { - msa_subv_b(a, b) +unsafe fn __msa_subv_b(a: v16i8, b: v16i8) -> v16i8 { + msa_subv_b(a, ::mem::transmute(b)) } /// Vector Subtract @@ -8968,8 +9018,8 @@ unsafe fn __msa_subv_b(a: i8x16, b: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.h))] -unsafe fn __msa_subv_h(a: i16x8, b: i16x8) -> i16x8 { - msa_subv_h(a, b) +unsafe fn __msa_subv_h(a: v8i16, b: v8i16) -> v8i16 { + msa_subv_h(a, ::mem::transmute(b)) } /// Vector Subtract @@ -8981,8 +9031,8 @@ unsafe fn __msa_subv_h(a: i16x8, b: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.w))] -unsafe fn __msa_subv_w(a: i32x4, b: i32x4) -> i32x4 { - msa_subv_w(a, b) +unsafe fn __msa_subv_w(a: v4i32, b: v4i32) -> v4i32 { + msa_subv_w(a, ::mem::transmute(b)) } /// Vector Subtract @@ -8994,8 +9044,8 @@ unsafe fn __msa_subv_w(a: i32x4, b: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.d))] -unsafe fn __msa_subv_d(a: i64x2, b: i64x2) -> i64x2 { - msa_subv_d(a, b) +unsafe fn __msa_subv_d(a: v2i64, b: v2i64) -> v2i64 { + msa_subv_d(a, ::mem::transmute(b)) } /// Immediate Subtract @@ -9008,7 +9058,7 @@ unsafe fn __msa_subv_d(a: i64x2, b: i64x2) -> i64x2 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.b, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_subvi_b(a: i8x16, imm5: i32) -> i8x16 { +unsafe fn __msa_subvi_b(a: v16i8, imm5: i32) -> v16i8 { macro_rules! call { ($imm5:expr) => { msa_subvi_b(a, $imm5) @@ -9027,7 +9077,7 @@ unsafe fn __msa_subvi_b(a: i8x16, imm5: i32) -> i8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.h, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_subvi_h(a: i16x8, imm5: i32) -> i16x8 { +unsafe fn __msa_subvi_h(a: v8i16, imm5: i32) -> v8i16 { macro_rules! call { ($imm5:expr) => { msa_subvi_h(a, $imm5) @@ -9046,7 +9096,7 @@ unsafe fn __msa_subvi_h(a: i16x8, imm5: i32) -> i16x8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.w, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_subvi_w(a: i32x4, imm5: i32) -> i32x4 { +unsafe fn __msa_subvi_w(a: v4i32, imm5: i32) -> v4i32 { macro_rules! call { ($imm5:expr) => { msa_subvi_w(a, $imm5) @@ -9065,7 +9115,7 @@ unsafe fn __msa_subvi_w(a: i32x4, imm5: i32) -> i32x4 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.d, imm5 = 0b10111))] #[rustc_args_required_const(1)] -unsafe fn __msa_subvi_d(a: i64x2, imm5: i32) -> i64x2 { +unsafe fn __msa_subvi_d(a: v2i64, imm5: i32) -> v2i64 { macro_rules! call { ($imm5:expr) => { msa_subvi_d(a, $imm5) @@ -9087,8 +9137,8 @@ unsafe fn __msa_subvi_d(a: i64x2, imm5: i32) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.b))] -unsafe fn __msa_vshf_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { - msa_vshf_b(a, b, c) +unsafe fn __msa_vshf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + msa_vshf_b(a, ::mem::transmute(b), c) } /// Vector Data Preserving Shuffle @@ -9104,8 +9154,8 @@ unsafe fn __msa_vshf_b(a: i8x16, b: i8x16, c: i8x16) -> i8x16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.h))] -unsafe fn __msa_vshf_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { - msa_vshf_h(a, b, c) +unsafe fn __msa_vshf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + msa_vshf_h(a, ::mem::transmute(b), c) } /// Vector Data Preserving Shuffle @@ -9121,8 +9171,8 @@ unsafe fn __msa_vshf_h(a: i16x8, b: i16x8, c: i16x8) -> i16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.w))] -unsafe fn __msa_vshf_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { - msa_vshf_w(a, b, c) +unsafe fn __msa_vshf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + msa_vshf_w(a, ::mem::transmute(b), c) } /// Vector Data Preserving Shuffle @@ -9138,8 +9188,8 @@ unsafe fn __msa_vshf_w(a: i32x4, b: i32x4, c: i32x4) -> i32x4 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.d))] -unsafe fn __msa_vshf_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { - msa_vshf_d(a, b, c) +unsafe fn __msa_vshf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + msa_vshf_d(a, ::mem::transmute(b), c) } /// Vector Logical Exclusive Or @@ -9152,8 +9202,8 @@ unsafe fn __msa_vshf_d(a: i64x2, b: i64x2, c: i64x2) -> i64x2 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(xor.v))] -unsafe fn __msa_xor_v(a: u8x16, b: u8x16) -> u8x16 { - msa_xor_v(a, b) +unsafe fn __msa_xor_v(a: v16u8, b: v16u8) -> v16u8 { + msa_xor_v(a, ::mem::transmute(b)) } /// Immediate Logical Exclusive Or @@ -9167,7 +9217,7 @@ unsafe fn __msa_xor_v(a: u8x16, b: u8x16) -> u8x16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(xori.b, imm8 = 0b11111111))] #[rustc_args_required_const(1)] -unsafe fn __msa_xori_b(a: u8x16, imm8: i32) -> u8x16 { +unsafe fn __msa_xori_b(a: v16u8, imm8: i32) -> v16u8 { macro_rules! call { ($imm8:expr) => { msa_xori_b(a, $imm8) @@ -9178,1419 +9228,1767 @@ unsafe fn __msa_xori_b(a: u8x16, imm8: i32) -> u8x16 { #[cfg(test)] mod tests { + use crate::core_arch::mips::msa::*; + use core_arch::simd::*; use std::f32; use std::f64; - use core_arch::mips::msa::*; + use std::mem; use stdsimd_test::simd_test; #[simd_test(enable = "msa")] unsafe fn test_msa_add_a_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1, -4, -3, -2, -1 ); - let r = i8x16( - 5, 5, 5, 5, + #[rustfmt::skip] + let r = i8x16::new( + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 ); - assert_eq!(r, __msa_add_a_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_add_a_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_add_a_h() { #[rustfmt::skip] - let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = i16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = i16x8(-4, -3, -2, -1, -4, -3, -2, -1); - let r = i16x8(5, 5, 5, 5, 5, 5, 5, 5); + let b = i16x8::new(-4, -3, -2, -1, -4, -3, -2, -1); + #[rustfmt::skip] + let r = i16x8::new(5, 5, 5, 5, 5, 5, 5, 5); - assert_eq!(r, __msa_add_a_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_add_a_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_add_a_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = i32x4(-4, -3, -2, -1); - let r = i32x4(5, 5, 5, 5); + let b = i32x4::new(-4, -3, -2, -1); + #[rustfmt::skip] + let r = i32x4::new(5, 5, 5, 5); - assert_eq!(r, __msa_add_a_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_add_a_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_add_a_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(-4, -3); - let r = i64x2(5, 5); + let b = i64x2::new(-4, -3); + #[rustfmt::skip] + let r = i64x2::new(5, 5); - assert_eq!(r, __msa_add_a_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_add_a_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_a_b() { #[rustfmt::skip] - let a = i8x16( - 100, i8::max_value(), 100, i8::max_value(), + let a = i8x16::new( + 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value() - ); + ); #[rustfmt::skip] - let b = i8x16( - -4, -3, -2, -100, - -4, -3, -2, -100, - -4, -3, -2, -100, + let b = i8x16::new( + -4, -3, -2, -100, + -4, -3, -2, -100, + -4, -3, -2, -100, -4, -3, -2, -100 - ); - let r = i8x16( + ); + #[rustfmt::skip] + let r = i8x16::new( 104, 127, 102, 127, 104, 127, 102, 127, 104, 127, 102, 127, 104, 127, 102, 127 ); - assert_eq!(r, __msa_adds_a_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_a_b(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_a_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 100, i16::max_value(), 100, i16::max_value(), 100, i16::max_value(), 100, i16::max_value() ); #[rustfmt::skip] - let b = i16x8(-4, -3, -2, -1, -4, -3, -2, -1); - let r = i16x8( + let b = i16x8::new(-4, -3, -2, -1, -4, -3, -2, -1); + #[rustfmt::skip] + let r = i16x8::new( 104, i16::max_value(), 102, i16::max_value(), 104, i16::max_value(), 102, i16::max_value() ); - assert_eq!(r, __msa_adds_a_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_a_h(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_a_w() { #[rustfmt::skip] - let a = i32x4(100, i32::max_value(), 100, i32::max_value()); + let a = i32x4::new(100, i32::max_value(), 100, i32::max_value()); #[rustfmt::skip] - let b = i32x4(-4, -3, -2, -1); - let r = i32x4(104, i32::max_value(), 102, i32::max_value()); + let b = i32x4::new(-4, -3, -2, -1); + #[rustfmt::skip] + let r = i32x4::new(104, i32::max_value(), 102, i32::max_value()); - assert_eq!(r, __msa_adds_a_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_a_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_adds_a_d() { #[rustfmt::skip] - let a = i64x2(100, i64::max_value()); + let a = i64x2::new(100, i64::max_value()); #[rustfmt::skip] - let b = i64x2(-4, -3); - let r = i64x2(104, i64::max_value()); + let b = i64x2::new(-4, -3); + #[rustfmt::skip] + let r = i64x2::new(104, i64::max_value()); - assert_eq!(r, __msa_adds_a_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_a_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 100, i8::min_value(), 100, i8::max_value(), 100, i8::min_value(), 100, i8::max_value(), 100, i8::min_value(), 100, i8::max_value(), 100, i8::min_value(), 100, i8::max_value() ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -4, -3, -2, 100, -4, -3, -2, 100, -4, -3, -2, 100, -4, -3, -2, 100 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 96, i8::min_value(), 98, i8::max_value(), 96, i8::min_value(), 98, i8::max_value(), 96, i8::min_value(), 98, i8::max_value(), 96, i8::min_value(), 98, i8::max_value() ); - assert_eq!(r, __msa_adds_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_s_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 100, i16::min_value(), 100, i16::max_value(), 100, i16::min_value(), 100, i16::max_value() ); #[rustfmt::skip] - let b = i16x8(-4, -3, -2, 1, -4, -3, -2, 1); - let r = i16x8( + let b = i16x8::new(-4, -3, -2, 1, -4, -3, -2, 1); + #[rustfmt::skip] + let r = i16x8::new( 96, i16::min_value(), 98, i16::max_value(), 96, i16::min_value(), 98, i16::max_value() ); - assert_eq!(r, __msa_adds_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_s_w() { #[rustfmt::skip] - let a = i32x4(100, i32::max_value(), 100, i32::min_value()); + let a = i32x4::new(100, i32::max_value(), 100, i32::min_value()); + #[rustfmt::skip] + let b = i32x4::new(-4, 3, -2, -1); #[rustfmt::skip] - let b = i32x4(-4, 3, -2, -1); - let r = i32x4(96, i32::max_value(), 98, i32::min_value()); + let r = i32x4::new(96, i32::max_value(), 98, i32::min_value()); - assert_eq!(r, __msa_adds_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_adds_s_d() { #[rustfmt::skip] - let a = i64x2(100, i64::min_value()); + let a = i64x2::new(100, i64::min_value()); + #[rustfmt::skip] + let b = i64x2::new(-4, -3); #[rustfmt::skip] - let b = i64x2(-4, -3); - let r = i64x2(96, i64::min_value()); + let r = i64x2::new(96, i64::min_value()); - assert_eq!(r, __msa_adds_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value() ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 4, 3, 2, 100, 4, 3, 2, 100, 4, 3, 2, 100, 4, 3, 2, 100 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 104, u8::max_value(), 102, u8::max_value(), 104, u8::max_value(), 102, u8::max_value(), 104, u8::max_value(), 102, u8::max_value(), 104, u8::max_value(), 102, u8::max_value() ); - assert_eq!(r, __msa_adds_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 100, u16::max_value(), 100, u16::max_value(), 100, u16::max_value(), 100, u16::max_value() ); #[rustfmt::skip] - let b = u16x8(4, 3, 2, 1, 4, 3, 2, 1); - let r = u16x8( + let b = u16x8::new(4, 3, 2, 1, 4, 3, 2, 1); + #[rustfmt::skip] + let r = u16x8::new( 104, u16::max_value(), 102, u16::max_value(), 104, u16::max_value(), 102, u16::max_value() ); - assert_eq!(r, __msa_adds_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_adds_u_w() { #[rustfmt::skip] - let a = u32x4(100, u32::max_value(), 100, u32::max_value()); + let a = u32x4::new(100, u32::max_value(), 100, u32::max_value()); #[rustfmt::skip] - let b = u32x4(4, 3, 2, 1); - let r = u32x4(104, u32::max_value(), 102, u32::max_value()); + let b = u32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = u32x4::new(104, u32::max_value(), 102, u32::max_value()); - assert_eq!(r, __msa_adds_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_adds_u_d() { #[rustfmt::skip] - let a = u64x2(100, u64::max_value()); + let a = u64x2::new(100, u64::max_value()); #[rustfmt::skip] - let b = u64x2(4, 3); - let r = u64x2(104, u64::max_value()); + let b = u64x2::new(4, 3); + #[rustfmt::skip] + let r = u64x2::new(104, u64::max_value()); - assert_eq!(r, __msa_adds_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_adds_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_addv_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 100, i8::min_value(), 100, i8::max_value(), 100, i8::min_value(), 100, i8::max_value(), 100, i8::min_value(), 100, i8::max_value(), 100, i8::min_value(), 100, i8::max_value() ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -4, -3, -2, 100, -4, -3, -2, 100, -4, -3, -2, 100, -4, -3, -2, 100 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 96, 125, 98, -29, 96, 125, 98, -29, 96, 125, 98, -29, 96, 125, 98, -29 ); - assert_eq!(r, __msa_addv_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_addv_b(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_addv_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 100, i16::min_value(), 100, i16::max_value(), 100, i16::min_value(), 100, i16::max_value() ); #[rustfmt::skip] - let b = i16x8(-4, -3, -2, 1, -4, -3, -2, 1); - let r = i16x8(96, 32765, 98, -32768, 96, 32765, 98, -32768); + let b = i16x8::new(-4, -3, -2, 1, -4, -3, -2, 1); + #[rustfmt::skip] + let r = i16x8::new(96, 32765, 98, -32768, 96, 32765, 98, -32768); - assert_eq!(r, __msa_addv_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_addv_h(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_addv_w() { #[rustfmt::skip] - let a = i32x4(100, i32::max_value(), 100, i32::min_value()); + let a = i32x4::new(100, i32::max_value(), 100, i32::min_value()); + #[rustfmt::skip] + let b = i32x4::new(-4, 3, -2, -1); #[rustfmt::skip] - let b = i32x4(-4, 3, -2, -1); - let r = i32x4(96, -2147483646, 98, 2147483647); + let r = i32x4::new(96, -2147483646, 98, 2147483647); - assert_eq!(r, __msa_addv_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_addv_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_addv_d() { #[rustfmt::skip] - let a = i64x2(100, i64::min_value()); + let a = i64x2::new(100, i64::min_value()); + #[rustfmt::skip] + let b = i64x2::new(-4, -3); #[rustfmt::skip] - let b = i64x2(-4, -3); - let r = i64x2(96, 9223372036854775805); + let r = i64x2::new(96, 9223372036854775805); - assert_eq!(r, __msa_addv_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_addv_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_addvi_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value(), 100, i8::max_value() ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 103, -126, 103, -126, 103, -126, 103, -126, 103, -126, 103, -126, 103, -126, 103, -126 ); - assert_eq!(r, __msa_addvi_b(a, 67)); + assert_eq!(r, ::mem::transmute(__msa_addvi_b(::mem::transmute(a), 67))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_addvi_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 3276, -100, -127, i16::max_value(), 3276, -100, -127 ); - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( -32766, 3279, -97, -124, -32766, 3279, -97, -124 ); - assert_eq!(r, __msa_addvi_h(a, 67)); + assert_eq!(r, ::mem::transmute(__msa_addvi_h(::mem::transmute(a), 67))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_addvi_w() { #[rustfmt::skip] - let a = i32x4(100, i32::max_value(), 100, i32::min_value()); - let r = i32x4(103, -2147483646, 103, -2147483645); + let a = i32x4::new(100, i32::max_value(), 100, i32::min_value()); + #[rustfmt::skip] + let r = i32x4::new(103, -2147483646, 103, -2147483645); - assert_eq!(r, __msa_addvi_w(a, 67)); + assert_eq!(r, ::mem::transmute(__msa_addvi_w(::mem::transmute(a), 67))); } #[simd_test(enable = "msa")] unsafe fn test_msa_addvi_d() { #[rustfmt::skip] - let a = i64x2(100, i64::min_value()); + let a = i64x2::new(100, i64::min_value()); #[rustfmt::skip] - let r = i64x2(117, -9223372036854775791); + let r = i64x2::new(117, -9223372036854775791); - assert_eq!(r, __msa_addvi_d(a, 17)); + assert_eq!(r, ::mem::transmute(__msa_addvi_d(::mem::transmute(a), 17))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_and_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value() ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 4, 3, 2, 100, 4, 3, 2, 100, 4, 3, 2, 100, 4, 3, 2, 100 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 4, 3, 0, 100, 4, 3, 0, 100, 4, 3, 0, 100, 4, 3, 0, 100 ); - assert_eq!(r, __msa_and_v(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_and_v(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_andi_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value(), 100, u8::max_value() ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5 ); - assert_eq!(r, __msa_andi_b(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_andi_b(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 ); - assert_eq!(r, __msa_asub_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_s_h() { #[rustfmt::skip] - let a = i16x8(-1, -2, -3, -4, -1, -2, -3, -4); + let a = i16x8::new(-1, -2, -3, -4, -1, -2, -3, -4); #[rustfmt::skip] - let b = i16x8(-6, -7, -8, -9, -6, -7, -8, -9); - let r = i16x8(5, 5, 5, 5, 5, 5, 5, 5); + let b = i16x8::new(-6, -7, -8, -9, -6, -7, -8, -9); + #[rustfmt::skip] + let r = i16x8::new(5, 5, 5, 5, 5, 5, 5, 5); - assert_eq!(r, __msa_asub_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_s_w() { #[rustfmt::skip] - let a = i32x4(-1, -2, -3, -4); + let a = i32x4::new(-1, -2, -3, -4); #[rustfmt::skip] - let b = i32x4(-6, -7, -8, -9); - let r = i32x4(5, 5, 5, 5); + let b = i32x4::new(-6, -7, -8, -9); + #[rustfmt::skip] + let r = i32x4::new(5, 5, 5, 5); - assert_eq!(r, __msa_asub_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_s_d() { #[rustfmt::skip] - let a = i64x2(-1, -2); + let a = i64x2::new(-1, -2); #[rustfmt::skip] - let b = i64x2(-6, -7); - let r = i64x2(5, 5); + let b = i64x2::new(-6, -7); + #[rustfmt::skip] + let r = i64x2::new(5, 5); - assert_eq!(r, __msa_asub_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_asub_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 ); - assert_eq!(r, __msa_asub_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_u_h() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(5, 5, 5, 5, 5, 5, 5, 5); + let r = u16x8::new(5, 5, 5, 5, 5, 5, 5, 5); - assert_eq!(r, __msa_asub_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(5, 5, 5, 5); + let r = u32x4::new(5, 5, 5, 5); - assert_eq!(r, __msa_asub_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_asub_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); + #[rustfmt::skip] + let b = u64x2::new(6, 7); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(5, 5); + let r = u64x2::new(5, 5); - assert_eq!(r, __msa_asub_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_asub_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 6, -7, 8, -9, 6, -7, 8, -9, 6, -7, 8, -9, 6, -7, 8, -9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 2, -5, 2, -7, 2, -5, 2, -7, 2, -5, 2, -7, 2, -5, 2, -7 ); - assert_eq!(r, __msa_ave_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_s_h() { #[rustfmt::skip] - let a = i16x8(-1, -2, -3, -4, -1, -2, -3, -4); + let a = i16x8::new(-1, -2, -3, -4, -1, -2, -3, -4); + #[rustfmt::skip] + let b = i16x8::new(6, -7, 8, -9, 6, -7, 8, -9); #[rustfmt::skip] - let b = i16x8(6, -7, 8, -9, 6, -7, 8, -9); - let r = i16x8(2, -5, 2, -7, 2, -5, 2, -7); + let r = i16x8::new(2, -5, 2, -7, 2, -5, 2, -7); - assert_eq!(r, __msa_ave_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_s_w() { #[rustfmt::skip] - let a = i32x4(-1, -2, -3, -4); + let a = i32x4::new(-1, -2, -3, -4); + #[rustfmt::skip] + let b = i32x4::new(6, -7, 8, -9); #[rustfmt::skip] - let b = i32x4(6, -7, 8, -9); - let r = i32x4(2, -5, 2, -7); + let r = i32x4::new(2, -5, 2, -7); - assert_eq!(r, __msa_ave_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_s_d() { #[rustfmt::skip] - let a = i64x2(-1, -2); + let a = i64x2::new(-1, -2); + #[rustfmt::skip] + let b = i64x2::new(-6, -7); #[rustfmt::skip] - let b = i64x2(-6, -7); - let r = i64x2(-4, -5); + let r = i64x2::new(-4, -5); - assert_eq!(r, __msa_ave_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ave_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6 ); - assert_eq!(r, __msa_ave_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_u_h() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(3, 4, 5, 6, 3, 4, 5, 6); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(3, 4, 5, 6, 3, 4, 5, 6); - assert_eq!(r, __msa_ave_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(3, 4, 5, 6); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(3, 4, 5, 6); - assert_eq!(r, __msa_ave_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ave_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(3, 4); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(3, 4); - assert_eq!(r, __msa_ave_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ave_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -1, -2, 3, -4, -1, -2, 3, -4, -1, -2, 3, -4, -1, -2, 3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, 7, -8, -9, -6, 7, -8, -9, -6, 7, -8, -9, -6, 7, -8, -9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -3, 3, -2, -6, -3, 3, -2, -6, -3, 3, -2, -6, -3, 3, -2, -6 ); - assert_eq!(r, __msa_aver_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_s_h() { #[rustfmt::skip] - let a = i16x8(-1, -2, 3, -4, -1, -2, 3, -4); + let a = i16x8::new(-1, -2, 3, -4, -1, -2, 3, -4); #[rustfmt::skip] - let b = i16x8(-6, 7, -8, -9, -6, 7, -8, -9); - let r = i16x8(-3, 3, -2, -6, -3, 3, -2, -6); + let b = i16x8::new(-6, 7, -8, -9, -6, 7, -8, -9); + #[rustfmt::skip] + let r = i16x8::new(-3, 3, -2, -6, -3, 3, -2, -6); - assert_eq!(r, __msa_aver_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_s_w() { #[rustfmt::skip] - let a = i32x4(-1, -2, 3, -4); + let a = i32x4::new(-1, -2, 3, -4); #[rustfmt::skip] - let b = i32x4(-6, 7, -8, -9); - let r = i32x4(-3, 3, -2, -6); + let b = i32x4::new(-6, 7, -8, -9); + #[rustfmt::skip] + let r = i32x4::new(-3, 3, -2, -6); - assert_eq!(r, __msa_aver_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_s_d() { #[rustfmt::skip] - let a = i64x2(-1, -2); + let a = i64x2::new(-1, -2); #[rustfmt::skip] - let b = i64x2(-6, -7); - let r = i64x2(-3, -4); + let b = i64x2::new(-6, -7); + #[rustfmt::skip] + let r = i64x2::new(-3, -4); - assert_eq!(r, __msa_aver_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7 ); - assert_eq!(r, __msa_aver_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_u_h() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(4, 5, 6, 7, 4, 5, 6, 7); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(4, 5, 6, 7, 4, 5, 6, 7); - assert_eq!(r, __msa_aver_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(4, 5, 6, 7); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(4, 5, 6, 7); - assert_eq!(r, __msa_aver_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_aver_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(4, 5); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(4, 5); - assert_eq!(r, __msa_aver_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_aver_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclr_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 191, 27, 54, 1, 191, 27, 54, 1, 191, 27, 54, 1, 191, 27, 54, 1 ); - assert_eq!(r, __msa_bclr_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bclr_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclr_h() { #[rustfmt::skip] - let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + let a = u16x8::new(255, 155, 55, 1, 255, 155, 55, 1); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(191, 27, 55, 1, 191, 27, 55, 1); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(191, 27, 55, 1, 191, 27, 55, 1); - assert_eq!(r, __msa_bclr_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bclr_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclr_w() { #[rustfmt::skip] - let a = u32x4(255, 155, 55, 1); + let a = u32x4::new(255, 155, 55, 1); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(191, 27, 55, 1); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(191, 27, 55, 1); - assert_eq!(r, __msa_bclr_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bclr_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclr_d() { #[rustfmt::skip] - let a = u64x2(255, 155); + let a = u64x2::new(255, 155); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(191, 27); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(191, 27); - assert_eq!(r, __msa_bclr_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bclr_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclri_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 247, 147, 55, 1, 247, 147, 55, 1, 247, 147, 55, 1, 247, 147, 55, 1 ); - assert_eq!(r, __msa_bclri_b(a, 3)); + assert_eq!(r, ::mem::transmute(__msa_bclri_b(::mem::transmute(a), 3))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclri_h() { #[rustfmt::skip] - let a = u16x8(2155, 1155, 155, 1, 2155, 1155, 155, 1); - let r = u16x8(107, 1155, 155, 1, 107, 1155, 155, 1); + let a = u16x8::new(2155, 1155, 155, 1, 2155, 1155, 155, 1); + #[rustfmt::skip] + let r = u16x8::new(107, 1155, 155, 1, 107, 1155, 155, 1); - assert_eq!(r, __msa_bclri_h(a, 11)); + assert_eq!(r, ::mem::transmute(__msa_bclri_h(::mem::transmute(a), 11))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclri_w() { #[rustfmt::skip] - let a = u32x4(211111155, 111111155, 11111155, 1); - let r = u32x4(202722547, 102722547, 2722547, 1); + let a = u32x4::new(211111155, 111111155, 11111155, 1); + #[rustfmt::skip] + let r = u32x4::new(202722547, 102722547, 2722547, 1); - assert_eq!(r, __msa_bclri_w(a, 23)); + assert_eq!(r, ::mem::transmute(__msa_bclri_w(::mem::transmute(a), 23))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bclri_d() { #[rustfmt::skip] - let a = u64x2(211111111155, 11111111111111155); - let r = u64x2(73672157683, 11110973672157683); + let a = u64x2::new(211111111155, 11111111111111155); + #[rustfmt::skip] + let r = u64x2::new(73672157683, 11110973672157683); - assert_eq!(r, __msa_bclri_d(a, 37)); + assert_eq!(r, ::mem::transmute(__msa_bclri_d(::mem::transmute(a), 37))); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsl_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 1, 3, 5, 9, 1, 3, 5, 9, 1, 3, 5, 9, 1, 3, 5, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 63, 11, 11, 1, 63, 11, 11, 1, 63, 11, 11, 1, 63, 11, 11, 1 ); - assert_eq!(r, __msa_binsl_b(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsl_b( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsl_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 32767, 16384, 8192, 4096, 32767, 16384, 8192, 4096 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 21656, 5273, 7081, 2985, 21656, 5273, 7081, 2985 ); #[rustfmt::skip] - let c = u16x8( + let c = u16x8::new( 3, 7, 9, 13, 15, 17, 21, 23 ); - let r = u16x8( + #[rustfmt::skip] + let r = u16x8::new( 24575, 5120, 7040, 2984, 21656, 0, 6144, 2816 ); - assert_eq!(r, __msa_binsl_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsl_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsl_w() { #[rustfmt::skip] - let a = u32x4(2147483647, 536870912, 67108864, 8388608); + let a = u32x4::new(2147483647, 536870912, 67108864, 8388608); #[rustfmt::skip] - let b = u32x4(1036372536, 259093134, 78219975, 1119499719); + let b = u32x4::new(1036372536, 259093134, 78219975, 1119499719); #[rustfmt::skip] - let c = u32x4(11, 15, 31, 37); - let r = u32x4(1037041663, 259063808, 78219975, 1082130432); + let c = u32x4::new(11, 15, 31, 37); + #[rustfmt::skip] + let r = u32x4::new(1037041663, 259063808, 78219975, 1082130432); - assert_eq!(r, __msa_binsl_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsl_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsl_d() { #[rustfmt::skip] - let a = u64x2(8006399338, 2882303762); + let a = u64x2::new(8006399338, 2882303762); #[rustfmt::skip] - let b = u64x2(9223372036854775805, 536870912); + let b = u64x2::new(9223372036854775805, 536870912); #[rustfmt::skip] - let c = u64x2(12, 48); - let r = u64x2(9221120245047489898, 536901394); + let c = u64x2::new(12, 48); + #[rustfmt::skip] + let r = u64x2::new(9221120245047489898, 536901394); - assert_eq!(r, __msa_binsl_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsl_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsli_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 7, 7, 11, 9, 7, 7, 11, 9, 7, 7, 11, 9, 7, 7, 11, 9 ); - assert_eq!(r, __msa_binsli_b(a, b, 5)); + assert_eq!( + r, + ::mem::transmute(__msa_binsli_b(::mem::transmute(a), ::mem::transmute(b), 5)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsli_h() { - #[rustfmt::skip] - let a = u16x8( + #[rustfmt::skip] + let a = u16x8::new( 32767, 16384, 8192, 4096, 32767, 16384, 8192, 4096 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 21656, 5273, 7081, 2985, 21656, 5273, 7081, 2985 ); - let r = u16x8( + #[rustfmt::skip] + let r = u16x8::new( 21659, 5272, 7080, 2984, 21659, 5272, 7080, 2984 ); - assert_eq!(r, __msa_binsli_h(a, b, 13)); + assert_eq!( + r, + ::mem::transmute(__msa_binsli_h(::mem::transmute(a), ::mem::transmute(b), 13)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsli_w() { - #[rustfmt::skip] - let a = u32x4(2147483647, 536870912, 67108864, 8388608); #[rustfmt::skip] - let b = u32x4(1036372536, 259093134, 78219975, 1119499719); - let r = u32x4(1036386303, 259080192, 78217216, 1119485952); + let a = u32x4::new(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4::new(1036372536, 259093134, 78219975, 1119499719); + #[rustfmt::skip] + let r = u32x4::new(1036386303, 259080192, 78217216, 1119485952); - assert_eq!(r, __msa_binsli_w(a, b, 17)); + assert_eq!( + r, + ::mem::transmute(__msa_binsli_w(::mem::transmute(a), ::mem::transmute(b), 17)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsli_d() { - #[rustfmt::skip] - let a = u64x2(8006399338, 2882303762); #[rustfmt::skip] - let b = u64x2(9223372036854775805, 536870912); - let r = u64x2(9223372036854773098, 536901394); + let a = u64x2::new(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2::new(9223372036854775805, 536870912); + #[rustfmt::skip] + let r = u64x2::new(9223372036854773098, 536901394); - assert_eq!(r, __msa_binsli_d(a, b, 48)); + assert_eq!( + r, + ::mem::transmute(__msa_binsli_d(::mem::transmute(a), ::mem::transmute(b), 48)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsr_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 1, 3, 5, 9, 1, 3, 5, 9, 1, 3, 5, 9, 1, 3, 5, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 254, 151, 8, 1, 254, 151, 8, 1, 254, 151, 8, 1, 254, 151, 8, 1 ); - assert_eq!(r, __msa_binsr_b(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsr_b( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsr_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 32767, 16384, 8192, 4096, 32767, 16384, 8192, 4096 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 21656, 5273, 7081, 2985, 21656, 5273, 7081, 2985 ); #[rustfmt::skip] - let c = u16x8( + let c = u16x8::new( 3, 7, 9, 13, 15, 17, 21, 23 ); - let r = u16x8( + #[rustfmt::skip] + let r = u16x8::new( 32760, 16537, 9129, 2985, 21656, 16385, 8233, 4265 ); - assert_eq!(r, __msa_binsr_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsr_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsr_w() { #[rustfmt::skip] - let a = u32x4(2147483647, 536870912, 67108864, 8388608); + let a = u32x4::new(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4::new(1036372536, 259093134, 78219975, 1119499719); #[rustfmt::skip] - let b = u32x4(1036372536, 259093134, 78219975, 1119499719); + let c = u32x4::new(11, 15, 31, 37); #[rustfmt::skip] - let c = u32x4(11, 15, 31, 37); - let r = u32x4(2147482168, 536900238, 78219975, 8388615); + let r = u32x4::new(2147482168, 536900238, 78219975, 8388615); - assert_eq!(r, __msa_binsr_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsr_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsr_d() { #[rustfmt::skip] - let a = u64x2(8006399338, 2882303762); + let a = u64x2::new(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2::new(9223372036854775805, 536870912); #[rustfmt::skip] - let b = u64x2(9223372036854775805, 536870912); + let c = u64x2::new(12, 48); #[rustfmt::skip] - let c = u64x2(12, 48); - let r = u64x2(8006402045, 536870912); + let r = u64x2::new(8006402045, 536870912); - assert_eq!(r, __msa_binsr_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_binsr_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsri_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 198, 135, 8, 9, 198, 135, 8, 9, 198, 135, 8, 9, 198, 135, 8, 9 ); - assert_eq!(r, __msa_binsri_b(a, b, 5)); + assert_eq!( + r, + ::mem::transmute(__msa_binsri_b(::mem::transmute(a), ::mem::transmute(b), 5)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsri_h() { - #[rustfmt::skip] - let a = u16x8( + #[rustfmt::skip] + let a = u16x8::new( 32767, 16384, 8192, 4096, 32767, 16384, 8192, 4096 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 21656, 5273, 7081, 2985, 21656, 5273, 7081, 2985 ); - let r = u16x8( + #[rustfmt::skip] + let r = u16x8::new( 21656, 21657, 7081, 2985, 21656, 21657, 7081, 2985 ); - assert_eq!(r, __msa_binsri_h(a, b, 13)); + assert_eq!( + r, + ::mem::transmute(__msa_binsri_h(::mem::transmute(a), ::mem::transmute(b), 13)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsri_w() { - #[rustfmt::skip] - let a = u32x4(2147483647, 536870912, 67108864, 8388608); #[rustfmt::skip] - let b = u32x4(1036372536, 259093134, 78219975, 1119499719); - let r = u32x4(2147338808, 536965774, 67209927, 8533447); + let a = u32x4::new(2147483647, 536870912, 67108864, 8388608); + #[rustfmt::skip] + let b = u32x4::new(1036372536, 259093134, 78219975, 1119499719); + #[rustfmt::skip] + let r = u32x4::new(2147338808, 536965774, 67209927, 8533447); - assert_eq!(r, __msa_binsri_w(a, b, 17)); + assert_eq!( + r, + ::mem::transmute(__msa_binsri_w(::mem::transmute(a), ::mem::transmute(b), 17)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_binsri_d() { - #[rustfmt::skip] - let a = u64x2(8006399338, 2882303762); #[rustfmt::skip] - let b = u64x2(9223372036854775805, 536870912); - let r = u64x2(562949953421309, 536870912); + let a = u64x2::new(8006399338, 2882303762); + #[rustfmt::skip] + let b = u64x2::new(9223372036854775805, 536870912); + #[rustfmt::skip] + let r = u64x2::new(562949953421309, 536870912); - assert_eq!(r, __msa_binsri_d(a, b, 48)); + assert_eq!( + r, + ::mem::transmute(__msa_binsri_d(::mem::transmute(a), ::mem::transmute(b), 48)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bmnz_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 254, 159, 48, 1, 254, 159, 48, 1, 254, 159, 48, 1, 254, 159, 48, 1 ); - assert_eq!(r, __msa_bmnz_v(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_bmnz_v( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bmnzi_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 249, 159, 51, 7, 249, 159, 51, 7, 249, 159, 51, 7, 249, 159, 51, 7 ); - assert_eq!(r, __msa_bmnzi_b(a, b, 7)); + assert_eq!( + r, + ::mem::transmute(__msa_bmnzi_b(::mem::transmute(a), ::mem::transmute(b), 7)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bmz_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 7, 3, 15, 9, 7, 3, 15, 9, 7, 3, 15, 9, 7, 3, 15, 9 ); - assert_eq!(r, __msa_bmz_v(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_bmz_v( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bmzi_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1, u8::max_value(), 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 7, 251, 159, 49, 7, 251, 159, 49, 7, 251, 159, 49, 7, 251, 159, 49 ); - assert_eq!(r, __msa_bmzi_b(a, b, 7)); + assert_eq!( + r, + ::mem::transmute(__msa_bmzi_b(::mem::transmute(a), ::mem::transmute(b), 7)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bneg_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 191, 27, 54, 3, 191, 27, 54, 3, 191, 27, 54, 3, 191, 27, 54, 3 ); - assert_eq!(r, __msa_bneg_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bneg_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bneg_h() { #[rustfmt::skip] - let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + let a = u16x8::new(255, 155, 55, 1, 255, 155, 55, 1); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(191, 27, 311, 513, 191, 27, 311, 513); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(191, 27, 311, 513, 191, 27, 311, 513); - assert_eq!(r, __msa_bneg_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bneg_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bneg_w() { #[rustfmt::skip] - let a = u32x4(255, 155, 55, 1); + let a = u32x4::new(255, 155, 55, 1); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(191, 27, 311, 513); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(191, 27, 311, 513); - assert_eq!(r, __msa_bneg_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bneg_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bneg_d() { #[rustfmt::skip] - let a = u64x2(255, 155); + let a = u64x2::new(255, 155); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(191, 27); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(191, 27); - assert_eq!(r, __msa_bneg_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bneg_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnegi_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 50, 100, 127, u8::max_value(), 50, 100, 127, u8::max_value(), 50, 100, 127, u8::max_value(), 50, 100, 127, u8::max_value() ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 34, 116, 111, 239, 34, 116, 111, 239, 34, 116, 111, 239, 34, 116, 111, 239 ); - assert_eq!(r, __msa_bnegi_b(a, 4)); + assert_eq!(r, ::mem::transmute(__msa_bnegi_b(::mem::transmute(a), 4))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnegi_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 32767, 3276, 100, 127, 32767, 3276, 100, 127 ); - let r = u16x8(30719, 1228, 2148, 2175, 30719, 1228, 2148, 2175); + #[rustfmt::skip] + let r = u16x8::new( + 30719, 1228, 2148, 2175, + 30719, 1228, 2148, 2175 + ); - assert_eq!(r, __msa_bnegi_h(a, 11)); + assert_eq!(r, ::mem::transmute(__msa_bnegi_h(::mem::transmute(a), 11))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnegi_w() { #[rustfmt::skip] - let a = u32x4(100, 2147483647, 100, 2147483648); - let r = u32x4(16777316, 2130706431, 16777316, 2164260864); + let a = u32x4::new(100, 2147483647, 100, 2147483648); + #[rustfmt::skip] + let r = u32x4::new(16777316, 2130706431, 16777316, 2164260864); - assert_eq!(r, __msa_bnegi_w(a, 24)); + assert_eq!(r, ::mem::transmute(__msa_bnegi_w(::mem::transmute(a), 24))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bnegi_d() { #[rustfmt::skip] - let a = u64x2(100, 9223372036854775808); + let a = u64x2::new(100, 9223372036854775808); #[rustfmt::skip] - let r = u64x2(4398046511204, 9223376434901286912); + let r = u64x2::new(4398046511204, 9223376434901286912); - assert_eq!(r, __msa_bnegi_d(a, 42)); + assert_eq!(r, ::mem::transmute(__msa_bnegi_d(::mem::transmute(a), 42))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnz_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, @@ -10598,44 +10996,44 @@ mod tests { ); let r = 0 as i32; - assert_eq!(r, __msa_bnz_b(a)); + assert_eq!(r, ::mem::transmute(__msa_bnz_b(::mem::transmute(a)))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnz_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 32767, 3276, 100, 127, 32767, 0, 100, 127 ); let r = 0 as i32; - assert_eq!(r, __msa_bnz_h(a)); + assert_eq!(r, ::mem::transmute(__msa_bnz_h(::mem::transmute(a)))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnz_w() { #[rustfmt::skip] - let a = u32x4(100, 2147483647, 0, 2147483648); + let a = u32x4::new(100, 2147483647, 0, 2147483648); let r = 0 as i32; - assert_eq!(r, __msa_bnz_w(a)); + assert_eq!(r, ::mem::transmute(__msa_bnz_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bnz_d() { #[rustfmt::skip] - let a = u64x2(100, 9223372036854775808); + let a = u64x2::new(100, 9223372036854775808); #[rustfmt::skip] let r = 1 as i32; - assert_eq!(r, __msa_bnz_d(a)); + assert_eq!(r, ::mem::transmute(__msa_bnz_d(::mem::transmute(a)))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_bnz_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10643,162 +11041,209 @@ mod tests { ); let r = 1 as i32; - assert_eq!(r, __msa_bnz_v(a)); + assert_eq!(r, ::mem::transmute(__msa_bnz_v(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bsel_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); - let r = u8x16(7, 3, 15, 9, 7, 3, 15, 9, 7, 3, 15, 9, 7, 3, 15, 9); + #[rustfmt::skip] + let r = u8x16::new( + 7, 3, 15, 9, + 7, 3, 15, 9, + 7, 3, 15, 9, + 7, 3, 15, 9 + ); - assert_eq!(r, __msa_bsel_v(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_bsel_v( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bseli_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16(121, 29, 57, 9, 121, 29, 57, 9, 121, 29, 57, 9, 121, 29, 57, 9); + #[rustfmt::skip] + let r = u8x16::new( + 121, 29, 57, 9, + 121, 29, 57, 9, + 121, 29, 57, 9, + 121, 29, 57, 9 + ); - assert_eq!(r, __msa_bseli_b(a, b, 121)); + assert_eq!( + r, + ::mem::transmute(__msa_bseli_b(::mem::transmute(a), ::mem::transmute(b), 121)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bset_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16(255, 155, 55, 3, 255, 155, 55, 3, 255, 155, 55, 3, 255, 155, 55, 3); + #[rustfmt::skip] + let r = u8x16::new( + 255, 155, 55, 3, + 255, 155, 55, 3, + 255, 155, 55, 3, + 255, 155, 55, 3 + ); - assert_eq!(r, __msa_bset_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bset_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bset_h() { #[rustfmt::skip] - let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); + let a = u16x8::new(255, 155, 55, 1, 255, 155, 55, 1); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(255, 155, 311, 513, 255, 155, 311, 513); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(255, 155, 311, 513, 255, 155, 311, 513); - assert_eq!(r, __msa_bset_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bset_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bset_w() { #[rustfmt::skip] - let a = u32x4(255, 155, 55, 1); + let a = u32x4::new(255, 155, 55, 1); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(255, 155, 311, 513); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(255, 155, 311, 513); - assert_eq!(r, __msa_bset_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bset_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bset_d() { #[rustfmt::skip] - let a = u64x2(255, 155); + let a = u64x2::new(255, 155); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(255, 155); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(255, 155); - assert_eq!(r, __msa_bset_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_bset_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_bseti_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 255, 159, 55, 5, 255, 159, 55, 5, 255, 159, 55, 5, 255, 159, 55, 5 ); - assert_eq!(r, __msa_bseti_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_bseti_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bseti_h() { #[rustfmt::skip] - let a = u16x8(255, 155, 55, 1, 255, 155, 55, 1); - let r = u16x8(255, 159, 55, 5, 255, 159, 55, 5); + let a = u16x8::new(255, 155, 55, 1, 255, 155, 55, 1); + #[rustfmt::skip] + let r = u16x8::new(255, 159, 55, 5, 255, 159, 55, 5); - assert_eq!(r, __msa_bseti_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_bseti_h(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bseti_w() { #[rustfmt::skip] - let a = u32x4(255, 155, 55, 1); - let r = u32x4(255, 159, 55, 5); + let a = u32x4::new(255, 155, 55, 1); + #[rustfmt::skip] + let r = u32x4::new(255, 159, 55, 5); - assert_eq!(r, __msa_bseti_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_bseti_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bseti_d() { #[rustfmt::skip] - let a = u64x2(255, 155); - let r = u64x2(255, 159); + let a = u64x2::new(255, 155); + #[rustfmt::skip] + let r = u64x2::new(255, 159); - assert_eq!(r, __msa_bseti_d(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_bseti_d(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bz_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 255, 155, 55, 1, 255, 155, 55, 1, 255, 155, 55, 1, @@ -10806,40 +11251,40 @@ mod tests { ); let r = 0 as i32; - assert_eq!(r, __msa_bz_b(a)); + assert_eq!(r, ::mem::transmute(__msa_bz_b(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bz_h() { #[rustfmt::skip] - let a = u16x8(0, 0, 0, 0, 0, 0, 0, 0); + let a = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); let r = 1 as i32; - assert_eq!(r, __msa_bz_h(a)); + assert_eq!(r, ::mem::transmute(__msa_bz_h(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bz_w() { #[rustfmt::skip] - let a = u32x4(255, 0, 55, 1); + let a = u32x4::new(255, 0, 55, 1); let r = 1 as i32; - assert_eq!(r, __msa_bz_w(a)); + assert_eq!(r, ::mem::transmute(__msa_bz_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bz_d() { #[rustfmt::skip] - let a = u64x2(255, 0); + let a = u64x2::new(255, 0); let r = 1 as i32; - assert_eq!(r, __msa_bz_d(a)); + assert_eq!(r, ::mem::transmute(__msa_bz_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_bz_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10847,101 +11292,125 @@ mod tests { ); let r = 1 as i32; - assert_eq!(r, __msa_bz_v(a)); + assert_eq!(r, ::mem::transmute(__msa_bz_v(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ceq_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, 127, 55, 1, -128, 127, 55, 1, -128, 127, 55, 1, -128, 127, 55, 1 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -128, 126, 55, 1, -128, 126, 55, 1, -128, 126, 55, 1, -128, 126, 55, 1 ); - let r = i8x16(-1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1); + #[rustfmt::skip] + let r = i8x16::new( + -1, 0, -1, -1, + -1, 0, -1, -1, + -1, 0, -1, -1, + -1, 0, -1, -1 + ); - assert_eq!(r, __msa_ceq_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ceq_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ceq_h() { #[rustfmt::skip] - let a = i16x8(255, 155, 55, 1, 255, 155, 55, 1); + let a = i16x8::new(255, 155, 55, 1, 255, 155, 55, 1); + #[rustfmt::skip] + let b = i16x8::new(255, 155, 56, 1, 255, 155, 56, 1); #[rustfmt::skip] - let b = i16x8(255, 155, 56, 1, 255, 155, 56, 1); - let r = i16x8(-1, -1, 0, -1, -1, -1, 0, -1); + let r = i16x8::new(-1, -1, 0, -1, -1, -1, 0, -1); - assert_eq!(r, __msa_ceq_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ceq_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ceq_w() { #[rustfmt::skip] - let a = i32x4(255, 155, 55, 1); + let a = i32x4::new(255, 155, 55, 1); + #[rustfmt::skip] + let b = i32x4::new(255, 156, 55, 1); #[rustfmt::skip] - let b = i32x4(255, 156, 55, 1); - let r = i32x4(-1, 0, -1, -1); + let r = i32x4::new(-1, 0, -1, -1); - assert_eq!(r, __msa_ceq_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ceq_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ceq_d() { #[rustfmt::skip] - let a = i64x2(255, 155); + let a = i64x2::new(255, 155); + #[rustfmt::skip] + let b = i64x2::new(255, 156); #[rustfmt::skip] - let b = i64x2(255, 156); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_ceq_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ceq_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ceqi_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 100, -1, -4, 15, 100, -1, -4, 15, 100, -1, -4, 15, 100, -1, -4, 15 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0 ); - assert_eq!(r, __msa_ceqi_b(a, -4)); + assert_eq!(r, ::mem::transmute(__msa_ceqi_b(::mem::transmute(a), -4))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ceqi_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 32767, 3276, 100, -11, 32767, 3276, 100, -11 ); - let r = i16x8(0, 0, 0, -1, 0, 0, 0, -1); + #[rustfmt::skip] + let r = i16x8::new(0, 0, 0, -1, 0, 0, 0, -1); - assert_eq!(r, __msa_ceqi_h(a, -11)); + assert_eq!(r, ::mem::transmute(__msa_ceqi_h(::mem::transmute(a), -11))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ceqi_w() { #[rustfmt::skip] - let a = i32x4(1, 3, 5, -3); - let r = i32x4(0, 0, -1, 0); + let a = i32x4::new(1, 3, 5, -3); + #[rustfmt::skip] + let r = i32x4::new(0, 0, -1, 0); - assert_eq!(r, __msa_ceqi_w(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_ceqi_w(::mem::transmute(a), 5))); } // FIXME: https://reviews.llvm.org/D59884 @@ -10950,11 +11419,11 @@ mod tests { // #[simd_test(enable = "msa")] // unsafe fn test_msa_ceqi_d() { // #[rustfmt::skip] - // let a = i64x2(-3, 2); + // let a = i64x2::new(-3, 2); // #[rustfmt::skip] - // let r = i64x2(-1, 0); + // let r = i64x2::new(-1, 0); - // assert_eq!(r, __msa_ceqi_d(a, -3)); + // assert_eq!(r, ::mem::transmute(__msa_ceqi_d(::mem::transmute(a), -3))); // } // Can not be tested in user mode @@ -10962,525 +11431,619 @@ mod tests { // unsafe fn test_msa_cfcmsa() { // let r = 5; - // assert_eq!(r, __msa_cfcmsa(5)); + // assert_eq!(r, ::mem::transmute(__msa_cfcmsa(5)); // } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, 127, 55, 2, -128, 127, 55, 2, -128, 127, 55, 2, -128, 127, 55, 2 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -128, 126, 55, 1, -128, 126, 55, 1, -128, 126, 55, 1, -128, 126, 55, 1 ); - let r = i8x16(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + #[rustfmt::skip] + let r = i8x16::new( + -1, 0, -1, 0, + -1, 0, -1, 0, + -1, 0, -1, 0, + -1, 0, -1, 0 + ); - assert_eq!(r, __msa_cle_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_s_h() { #[rustfmt::skip] - let a = i16x8(255, 155, 55, 2, 255, 155, 55, 2); + let a = i16x8::new(255, 155, 55, 2, 255, 155, 55, 2); #[rustfmt::skip] - let b = i16x8(255, 155, 56, 1, 255, 155, 56, 1); - let r = i16x8(-1, -1, -1, 0, -1, -1, -1, 0); + let b = i16x8::new(255, 155, 56, 1, 255, 155, 56, 1); + #[rustfmt::skip] + let r = i16x8::new(-1, -1, -1, 0, -1, -1, -1, 0); - assert_eq!(r, __msa_cle_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_s_w() { #[rustfmt::skip] - let a = i32x4(255, 155, 55, 2); + let a = i32x4::new(255, 155, 55, 2); #[rustfmt::skip] - let b = i32x4(255, 156, 55, 1); - let r = i32x4(-1, -1, -1, 0); + let b = i32x4::new(255, 156, 55, 1); + #[rustfmt::skip] + let r = i32x4::new(-1, -1, -1, 0); - assert_eq!(r, __msa_cle_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_s_d() { #[rustfmt::skip] - let a = i64x2(255, 155); + let a = i64x2::new(255, 155); #[rustfmt::skip] - let b = i64x2(255, 156); - let r = i64x2(-1, -1); + let b = i64x2::new(255, 156); + #[rustfmt::skip] + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_cle_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 127, 55, 2, u8::max_value(), 127, 55, 2, u8::max_value(), 127, 55, 2, u8::max_value(), 127, 55, 2 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( u8::max_value(), 126, 55, 1, u8::max_value(), 126, 55, 1, u8::max_value(), 126, 55, 1, u8::max_value(), 126, 55, 1 ); - let r = i8x16(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + #[rustfmt::skip] + let r = i8x16::new(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); - assert_eq!(r, __msa_cle_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( u16::max_value(), 155, 55, 2, u16::max_value(), 155, 55, 2 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( u16::max_value(), 155, 56, 1, u16::max_value(), 155, 56, 1 ); - let r = i16x8(-1, -1, -1, 0, -1, -1, -1, 0); + #[rustfmt::skip] + let r = i16x8::new(-1, -1, -1, 0, -1, -1, -1, 0); - assert_eq!(r, __msa_cle_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_u_w() { #[rustfmt::skip] - let a = u32x4(u32::max_value(), 155, 55, 2); + let a = u32x4::new(u32::max_value(), 155, 55, 2); #[rustfmt::skip] - let b = u32x4(u32::max_value(), 156, 55, 1); - let r = i32x4(-1, -1, -1, 0); + let b = u32x4::new(u32::max_value(), 156, 55, 1); + #[rustfmt::skip] + let r = i32x4::new(-1, -1, -1, 0); - assert_eq!(r, __msa_cle_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_cle_u_d() { #[rustfmt::skip] - let a = u64x2(u64::max_value(), 155); + let a = u64x2::new(u64::max_value(), 155); #[rustfmt::skip] - let b = u64x2(u64::max_value(), 156); - let r = i64x2(-1, -1); + let b = u64x2::new(u64::max_value(), 156); + #[rustfmt::skip] + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_cle_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_cle_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clei_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -2, -127, 100, -127, -2, -127, 100, -127, -2, -127, 100, -127, -2, -127, 100, -127 ); - let r = i8x16(-1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1); + #[rustfmt::skip] + let r = i8x16::new(-1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1); - assert_eq!(r, __msa_clei_s_b(a, -2)); + assert_eq!(r, ::mem::transmute(__msa_clei_s_b(::mem::transmute(a), -2))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clei_s_h() { #[rustfmt::skip] - let a = i16x8( - 32767, 3276, 10, -1, - 32767, 3276, 10, -1, - ); - let r = i16x8(0, 0, 0, -1, 0, 0, 0, -1); + let a = i16x8::new( + 32767, 3276, 10, -1, + 32767, 3276, 10, -1, + ); + #[rustfmt::skip] + let r = i16x8::new(0, 0, 0, -1, 0, 0, 0, -1); - assert_eq!(r, __msa_clei_s_h(a, -1)); + assert_eq!(r, ::mem::transmute(__msa_clei_s_h(::mem::transmute(a), -1))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clei_s_w() { #[rustfmt::skip] - let a = i32x4(100, 2147483647, 6, 2147483647); - let r = i32x4(0, 0, -1, 0); + let a = i32x4::new(100, 2147483647, 6, 2147483647); + #[rustfmt::skip] + let r = i32x4::new(0, 0, -1, 0); - assert_eq!(r, __msa_clei_s_w(a, 6)); + assert_eq!(r, ::mem::transmute(__msa_clei_s_w(::mem::transmute(a), 6))); } // FIXME: https://reviews.llvm.org/D59884 // If target type is i64, negative immediate loses the sign - // -3 is represented as 4294967293 + // -3 is represented as 4294967293 // #[simd_test(enable = "msa")] // unsafe fn test_msa_clei_s_d() { // #[rustfmt::skip] - // let a = i64x2(-3, 11); + // let a = i64x2::new(-3, 11); // #[rustfmt::skip] - // let r = i64x2(-1, 0); + // let r = i64x2::new(-1, 0); - // assert_eq!(r, __msa_clei_s_d(a, -3)); + // assert_eq!(r, ::mem::transmute(__msa_clei_s_d(::mem::transmute(a), -3))); // } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clei_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 2, 127, 100, 127, 2, 127, 100, 127, 2, 127, 100, 127, 2, 127, 100, 127, ); - let r = i8x16(-1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0); + #[rustfmt::skip] + let r = i8x16::new( + -1, 0, 0, 0, + -1, 0, 0, 0, + -1, 0, 0, 0, + -1, 0, 0, 0 + ); - assert_eq!(r, __msa_clei_u_b(a, 25)); + assert_eq!(r, ::mem::transmute(__msa_clei_u_b(::mem::transmute(a), 25))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clei_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 1, 26, 15, 36, 1, 26, 15, 36 ); - let r = i16x8(-1, 0, -1, 0, -1, 0, -1, 0); + #[rustfmt::skip] + let r = i16x8::new(-1, 0, -1, 0, -1, 0, -1, 0); - assert_eq!(r, __msa_clei_u_h(a, 25)); + assert_eq!(r, ::mem::transmute(__msa_clei_u_h(::mem::transmute(a), 25))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clei_u_w() { #[rustfmt::skip] - let a = u32x4(25, 32, 25, 32); - let r = i32x4(-1, 0, -1, 0); + let a = u32x4::new(25, 32, 25, 32); + #[rustfmt::skip] + let r = i32x4::new(-1, 0, -1, 0); - assert_eq!(r, __msa_clei_u_w(a, 31)); + assert_eq!(r, ::mem::transmute(__msa_clei_u_w(::mem::transmute(a), 31))); } #[simd_test(enable = "msa")] unsafe fn test_msa_clei_u_d() { #[rustfmt::skip] - let a = u64x2(10, 26); + let a = u64x2::new(10, 26); #[rustfmt::skip] - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_clei_u_d(a, 25)); + assert_eq!(r, ::mem::transmute(__msa_clei_u_d(::mem::transmute(a), 25))); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, 127, 55, 2, -128, 127, 55, 2, -128, 127, 55, 2, -128, 127, 55, 2 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -127, 126, 56, 1, -127, 126, 56, 1, -127, 126, 56, 1, -127, 126, 56, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0 ); - assert_eq!(r, __msa_clt_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_s_h() { #[rustfmt::skip] - let a = i16x8(-255, 155, 55, 2, -255, 155, 55, 2); + let a = i16x8::new(-255, 155, 55, 2, -255, 155, 55, 2); + #[rustfmt::skip] + let b = i16x8::new(255, 156, 56, 1, 255, 156, 56, 1); #[rustfmt::skip] - let b = i16x8(255, 156, 56, 1, 255, 156, 56, 1); - let r = i16x8(-1, -1, -1, 0, -1, -1, -1, 0); + let r = i16x8::new(-1, -1, -1, 0, -1, -1, -1, 0); - assert_eq!(r, __msa_clt_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_s_w() { #[rustfmt::skip] - let a = i32x4(-255, 155, 55, 2); + let a = i32x4::new(-255, 155, 55, 2); #[rustfmt::skip] - let b = i32x4(255, 156, 55, 1); - let r = i32x4(-1, -1, 0, 0); + let b = i32x4::new(255, 156, 55, 1); + #[rustfmt::skip] + let r = i32x4::new(-1, -1, 0, 0); - assert_eq!(r, __msa_clt_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_s_d() { #[rustfmt::skip] - let a = i64x2(-255, 155); + let a = i64x2::new(-255, 155); #[rustfmt::skip] - let b = i64x2(255, 156); - let r = i64x2(-1, -1); + let b = i64x2::new(255, 156); + #[rustfmt::skip] + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_clt_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 128, 127, 55, 2, 128, 127, 55, 2, 128, 127, 55, 2, 128, 127, 55, 2 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 127, 126, 56, 1, 127, 126, 56, 1, 127, 126, 56, 1, 127, 126, 56, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0 ); - assert_eq!(r, __msa_clt_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_u_h() { #[rustfmt::skip] - let a = u16x8(255, 155, 55, 2, 255, 155, 55, 2); + let a = u16x8::new(255, 155, 55, 2, 255, 155, 55, 2); #[rustfmt::skip] - let b = u16x8(255, 156, 56, 1, 255, 156, 56, 1); - let r = i16x8(0, -1, -1, 0, 0, -1, -1, 0); + let b = u16x8::new(255, 156, 56, 1, 255, 156, 56, 1); + #[rustfmt::skip] + let r = i16x8::new(0, -1, -1, 0, 0, -1, -1, 0); - assert_eq!(r, __msa_clt_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_u_w() { #[rustfmt::skip] - let a = u32x4(255, 155, 55, 2); + let a = u32x4::new(255, 155, 55, 2); #[rustfmt::skip] - let b = u32x4(255, 156, 55, 1); - let r = i32x4(0, -1, 0, 0); + let b = u32x4::new(255, 156, 55, 1); + #[rustfmt::skip] + let r = i32x4::new(0, -1, 0, 0); - assert_eq!(r, __msa_clt_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_clt_u_d() { #[rustfmt::skip] - let a = u64x2(255, 155); + let a = u64x2::new(255, 155); #[rustfmt::skip] - let b = u64x2(255, 156); - let r = i64x2(0, -1); + let b = u64x2::new(255, 156); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_clt_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_clt_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clti_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 2, -127, -5, 127, 2, -127, -5, 127, 2, -127, -5, 127, 2, -127, -5, 127 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0 ); - assert_eq!(r, __msa_clti_s_b(a, -5)); + assert_eq!(r, ::mem::transmute(__msa_clti_s_b(::mem::transmute(a), -5))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clti_s_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( -1024, 3276, 15, 127, -1024, 3276, 15, 127 ); - let r = i16x8(-1, 0, 0, 0, -1, 0, 0, 0); + #[rustfmt::skip] + let r = i16x8::new(-1, 0, 0, 0, -1, 0, 0, 0); - assert_eq!(r, __msa_clti_s_h(a, 15)); + assert_eq!(r, ::mem::transmute(__msa_clti_s_h(::mem::transmute(a), 15))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clti_s_w() { #[rustfmt::skip] - let a = i32x4(-15, 2147483647, -15, 2147483647); - let r = i32x4(-1, 0, -1, 0); + let a = i32x4::new(-15, 2147483647, -15, 2147483647); + #[rustfmt::skip] + let r = i32x4::new(-1, 0, -1, 0); - assert_eq!(r, __msa_clti_s_w(a, -10)); + assert_eq!( + r, + ::mem::transmute(__msa_clti_s_w(::mem::transmute(a), -10)) + ); } // FIXME: https://reviews.llvm.org/D59884 // If target type is i64, negative immediate loses the sign - // -3 is represented as 4294967293 + // -3 is represented as 4294967293 // #[simd_test(enable = "msa")] // unsafe fn test_msa_clti_s_d() { // #[rustfmt::skip] - // let a = i64x2(-5, -2); + // let a = i64x2::new(-5, -2); // #[rustfmt::skip] - // let r = i64x2(-1, 0); + // let r = i64x2::new(-1, 0); - // assert_eq!(r, __msa_clti_s_d(a, -3)); + // assert_eq!(r, ::mem::transmute(__msa_clti_s_d(::mem::transmute(a), -3))); // } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clti_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 2, 127, 49, 127, 2, 127, 49, 127, 2, 127, 49, 127, 2, 127, 49, 127, ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0 ); - assert_eq!(r, __msa_clti_u_b(a, 50)); + assert_eq!(r, ::mem::transmute(__msa_clti_u_b(::mem::transmute(a), 50))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clti_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 327, 3276, 100, 127, 327, 3276, 100, 127 ); - let r = i16x8(0, 0, 0, 0, 0, 0, 0, 0); + #[rustfmt::skip] + let r = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - assert_eq!(r, __msa_clti_u_h(a, 30)); + assert_eq!(r, ::mem::transmute(__msa_clti_u_h(::mem::transmute(a), 30))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_clti_u_w() { #[rustfmt::skip] - let a = u32x4(100, 2147483647, 100, 2147483647); - let r = i32x4(0, 0, 0, 0); + let a = u32x4::new(100, 2147483647, 100, 2147483647); + #[rustfmt::skip] + let r = i32x4::new(0, 0, 0, 0); - assert_eq!(r, __msa_clti_u_w(a, 10)); + assert_eq!(r, ::mem::transmute(__msa_clti_u_w(::mem::transmute(a), 10))); } #[simd_test(enable = "msa")] unsafe fn test_msa_clti_u_d() { #[rustfmt::skip] - let a = u64x2(1, 9223372036854775807); + let a = u64x2::new(1, 9223372036854775807); #[rustfmt::skip] - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_clti_u_d(a, 10)); + assert_eq!(r, ::mem::transmute(__msa_clti_u_d(::mem::transmute(a), 10))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_copy_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -100, 127, 4, 127, -100, 127, 4, 127, -100, 127, 4, 127, -100, 127, 4, 127 ); + #[rustfmt::skip] let r = -100 as i32; - assert_eq!(r, __msa_copy_s_b(a, 12)); + assert_eq!(r, ::mem::transmute(__msa_copy_s_b(::mem::transmute(a), 12))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_copy_s_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 32767, 3276, 100, 11, 32767, 3276, 100, 11 ); + #[rustfmt::skip] let r = 32767 as i32; - assert_eq!(r, __msa_copy_s_h(a, 4)); + assert_eq!(r, ::mem::transmute(__msa_copy_s_h(::mem::transmute(a), 4))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_copy_s_w() { #[rustfmt::skip] - let a = i32x4(100, 2147483647, 5, -2147483647); + let a = i32x4::new(100, 2147483647, 5, -2147483647); let r = 2147483647 as i32; - assert_eq!(r, __msa_copy_s_w(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_copy_s_w(::mem::transmute(a), 1))); } #[simd_test(enable = "msa")] unsafe fn test_msa_copy_s_d() { #[rustfmt::skip] - let a = i64x2(3, 9223372036854775807); + let a = i64x2::new(3, 9223372036854775807); #[rustfmt::skip] let r = 9223372036854775807 as i64; - assert_eq!(r, __msa_copy_s_d(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_copy_s_d(::mem::transmute(a), 1))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_copy_u_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 100, 127, 4, 127, 100, 127, 4, 127, 100, 127, 4, 127, 100, 127, 4, 127 ); + #[rustfmt::skip] let r = 100 as u32; - assert_eq!(r, __msa_copy_u_b(a, 12)); + assert_eq!(r, ::mem::transmute(__msa_copy_u_b(::mem::transmute(a), 12))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_copy_u_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 32767, 3276, 100, 11, 32767, 3276, 100, 11 ); + #[rustfmt::skip] let r = 32767 as u32; - assert_eq!(r, __msa_copy_u_h(a, 4)); + assert_eq!(r, ::mem::transmute(__msa_copy_u_h(::mem::transmute(a), 4))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_copy_u_w() { #[rustfmt::skip] - let a = i32x4(100, 2147483647, 5, 2147483647); + let a = i32x4::new(100, 2147483647, 5, 2147483647); + #[rustfmt::skip] let r = 2147483647 as u32; - assert_eq!(r, __msa_copy_u_w(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_copy_u_w(::mem::transmute(a), 1))); } #[simd_test(enable = "msa")] unsafe fn test_msa_copy_u_d() { #[rustfmt::skip] - let a = i64x2(3, i64::max_value()); + let a = i64x2::new(3, i64::max_value()); #[rustfmt::skip] let r = 9223372036854775807 as u64; - assert_eq!(r, __msa_copy_u_d(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_copy_u_d(::mem::transmute(a), 1))); } // Can not be tested in user mode @@ -11491,447 +12054,607 @@ mod tests { #[simd_test(enable = "msa")] unsafe fn test_msa_div_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9 - ); + ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 6, 3, 2, 2, 6, 3, 2, 2, 6, 3, 2, 2, 6, 3, 2, 2 ); - assert_eq!(r, __msa_div_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_s_h() { #[rustfmt::skip] - let a = i16x8(-6, -7, -8, -9, 6, 7, 8, 9); + let a = i16x8::new(-6, -7, -8, -9, 6, 7, 8, 9); #[rustfmt::skip] - let b = i16x8(-1, -2, -3, -4, -1, -2, -3, -4); - let r = i16x8(6, 3, 2, 2, -6, -3, -2, -2); + let b = i16x8::new(-1, -2, -3, -4, -1, -2, -3, -4); + #[rustfmt::skip] + let r = i16x8::new(6, 3, 2, 2, -6, -3, -2, -2); - assert_eq!(r, __msa_div_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_s_w() { #[rustfmt::skip] - let a = i32x4(-6, -7, 8, 9); - #[rustfmt::skip] - let b = i32x4(-1, -2, -3, -4); - let r = i32x4(6, 3, -2, -2); + let a = i32x4::new(-6, -7, 8, 9); + #[rustfmt::skip] + let b = i32x4::new(-1, -2, -3, -4); + #[rustfmt::skip] + let r = i32x4::new(6, 3, -2, -2); - assert_eq!(r, __msa_div_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_s_d() { #[rustfmt::skip] - let a = i64x2(-6, 7); + let a = i64x2::new(-6, 7); #[rustfmt::skip] - let b = i64x2(-1, -2); - let r = i64x2(6, -3); + let b = i64x2::new(-1, -2); + #[rustfmt::skip] + let r = i64x2::new(6, -3); - assert_eq!(r, __msa_div_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 - ); + ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 6, 3, 2, 2, 6, 3, 2, 2, 6, 3, 2, 2, 6, 3, 2, 2 ); - assert_eq!(r, __msa_div_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_u_h() { #[rustfmt::skip] - let a = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let a = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let b = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8(1, 2, 3, 4, 1, 2, 3, 4); - let r = u16x8(6, 3, 2, 2, 6, 3, 2, 2); + let r = u16x8::new(6, 3, 2, 2, 6, 3, 2, 2); - assert_eq!(r, __msa_div_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_u_w() { #[rustfmt::skip] - let a = u32x4(6, 7, 8, 9); - #[rustfmt::skip] - let b = u32x4(1, 2, 3, 4); - let r = u32x4(6, 3, 2, 2); + let a = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let b = u32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = u32x4::new(6, 3, 2, 2); - assert_eq!(r, __msa_div_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_div_u_d() { #[rustfmt::skip] - let a = u64x2(6, 7); + let a = u64x2::new(6, 7); #[rustfmt::skip] - let b = u64x2(1, 2); - let r = u64x2(6, 3); + let b = u64x2::new(1, 2); + #[rustfmt::skip] + let r = u64x2::new(6, 3); - assert_eq!(r, __msa_div_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_div_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dotp_s_h() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -1, -2, -3, 4, -1, -2, -3, -4, -1, -2, -3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = i16x8(20, -12, 20, 60, 20, -12, 20, 60); + #[rustfmt::skip] + let r = i16x8::new(20, -12, 20, 60, 20, -12, 20, 60); - assert_eq!(r, __msa_dotp_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_dotp_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dotp_s_w() { #[rustfmt::skip] - let a = i16x8(-1, -2, -3, -4, -1, -2, -3, 4); + let a = i16x8::new(-1, -2, -3, -4, -1, -2, -3, 4); + #[rustfmt::skip] + let b = i16x8::new(-6, -7, -8, -9, -6, -7, -8, -9); #[rustfmt::skip] - let b = i16x8(-6, -7, -8, -9, -6, -7, -8, -9); - let r = i32x4(20, 60, 20, -12); + let r = i32x4::new(20, 60, 20, -12); - assert_eq!(r, __msa_dotp_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_dotp_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dotp_s_d() { #[rustfmt::skip] - let a = i32x4(-1, -2, -3, 4); + let a = i32x4::new(-1, -2, -3, 4); + #[rustfmt::skip] + let b = i32x4::new(-6, -7, -8, -9); #[rustfmt::skip] - let b = i32x4(-6, -7, -8, -9); - let r = i64x2(20, -12); + let r = i64x2::new(20, -12); - assert_eq!(r, __msa_dotp_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_dotp_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_dotp_u_h() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u16x8(20, 60, 20, 60, 20, 60, 20, 60); + #[rustfmt::skip] + let r = u16x8::new(20, 60, 20, 60, 20, 60, 20, 60); - assert_eq!(r, __msa_dotp_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_dotp_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dotp_u_w() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u32x4(20, 60, 20, 60); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(20, 60, 20, 60); - assert_eq!(r, __msa_dotp_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_dotp_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dotp_u_d() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u64x2(20, 60); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u64x2::new(20, 60); - assert_eq!(r, __msa_dotp_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_dotp_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpadd_s_h() { #[rustfmt::skip] - let a = i16x8(-1, -2, -3, -4, -1, -2, -3, 4); + let a = i16x8::new(-1, -2, -3, -4, -1, -2, -3, 4); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -1, -2, -3, 4, -1, -2, -3, -4, -1, -2, -3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let c = i8x16( + let c = i8x16::new( -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = i16x8(19, -14, 17, 56, 19, -14, 17, 64); + #[rustfmt::skip] + let r = i16x8::new(19, -14, 17, 56, 19, -14, 17, 64); - assert_eq!(r, __msa_dpadd_s_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpadd_s_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpadd_s_w() { #[rustfmt::skip] - let a = i32x4(-1, -2, -3, -4); + let a = i32x4::new(-1, -2, -3, -4); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( -1, -2, -3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = i32x4(19, -14, 17, 56); + #[rustfmt::skip] + let r = i32x4::new(19, -14, 17, 56); - assert_eq!(r, __msa_dpadd_s_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpadd_s_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpadd_s_d() { #[rustfmt::skip] - let a = i64x2(-1, -2); + let a = i64x2::new(-1, -2); + #[rustfmt::skip] + let b = i32x4::new(-1, -2, -3, 4); #[rustfmt::skip] - let b = i32x4(-1, -2, -3, 4); + let c = i32x4::new(-6, -7, -8, -9); #[rustfmt::skip] - let c = i32x4(-6, -7, -8, -9); - let r = i64x2(19, -14); + let r = i64x2::new(19, -14); - assert_eq!(r, __msa_dpadd_s_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpadd_s_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpadd_u_h() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u16x8(21, 62, 23, 64, 21, 62, 23, 64); + #[rustfmt::skip] + let r = u16x8::new(21, 62, 23, 64, 21, 62, 23, 64); - assert_eq!(r, __msa_dpadd_u_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpadd_u_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpadd_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let c = u16x8( + let c = u16x8::new( 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u32x4(21, 62, 23, 64); + #[rustfmt::skip] + let r = u32x4::new(21, 62, 23, 64); - assert_eq!(r, __msa_dpadd_u_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpadd_u_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpadd_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); + #[rustfmt::skip] + let b = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(1, 2, 3, 4); + let c = u32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let c = u32x4(6, 7, 8, 9); - let r = u64x2(21, 62); + let r = u64x2::new(21, 62); - assert_eq!(r, __msa_dpadd_u_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpadd_u_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpsub_s_h() { #[rustfmt::skip] - let a = i16x8(-1, -2, -3, -4, -1, -2, -3, 4); + let a = i16x8::new(-1, -2, -3, -4, -1, -2, -3, 4); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -1, -2, -3, 4, -1, -2, -3, -4, -1, -2, -3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let c = i8x16( + let c = i8x16::new( -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = i16x8(-21, 10, -23, -64, -21, 10, -23, -56); + #[rustfmt::skip] + let r = i16x8::new(-21, 10, -23, -64, -21, 10, -23, -56); - assert_eq!(r, __msa_dpsub_s_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpsub_s_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpsub_s_w() { #[rustfmt::skip] - let a = i32x4(-1, -2, -3, -4); + let a = i32x4::new(-1, -2, -3, -4); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( -1, -2, -3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = i32x4(-21, 10, -23, -64); + #[rustfmt::skip] + let r = i32x4::new(-21, 10, -23, -64); - assert_eq!(r, __msa_dpsub_s_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpsub_s_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpsub_s_d() { #[rustfmt::skip] - let a = i64x2(-1, -2); + let a = i64x2::new(-1, -2); + #[rustfmt::skip] + let b = i32x4::new(-1, -2, -3, 4); #[rustfmt::skip] - let b = i32x4(-1, -2, -3, 4); + let c = i32x4::new(-6, -7, -8, -9); #[rustfmt::skip] - let c = i32x4(-6, -7, -8, -9); - let r = i64x2(-21, 10); + let r = i64x2::new(-21, 10); - assert_eq!(r, __msa_dpsub_s_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpsub_s_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpsub_u_h() { #[rustfmt::skip] - let a = i16x8(1, -2, 3, -4, -1, 2,-3, 4); + let a = i16x8::new(1, -2, 3, -4, -1, 2,-3, 4); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let c = u8x16( + let c = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = i16x8(-19, -62, -17, -64, -21, -58, -23, -56); + #[rustfmt::skip] + let r = i16x8::new(-19, -62, -17, -64, -21, -58, -23, -56); - assert_eq!(r, __msa_dpsub_u_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpsub_u_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpsub_u_w() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let c = u16x8( + let c = u16x8::new( 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = i32x4(-19, -62, -17, -64); + #[rustfmt::skip] + let r = i32x4::new(-19, -62, -17, -64); - assert_eq!(r, __msa_dpsub_u_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpsub_u_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_dpsub_u_d() { #[rustfmt::skip] - let a = i64x2(1, -2); + let a = i64x2::new(1, -2); + #[rustfmt::skip] + let b = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(1, 2, 3, 4); + let c = u32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let c = u32x4(6, 7, 8, 9); - let r = i64x2(-19, -62); + let r = i64x2::new(-19, -62); - assert_eq!(r, __msa_dpsub_u_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_dpsub_u_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fadd_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, -4.4); + let a = f32x4::new(1.1, -2.2, 3.3, -4.4); + #[rustfmt::skip] + let b = f32x4::new(4.4, -3.3, 2.2, -1.1); #[rustfmt::skip] - let b = f32x4(4.4, -3.3, 2.2, -1.1); - let r = f32x4(5.5, -5.5, 5.5, -5.5); + let r = f32x4::new(5.5, -5.5, 5.5, -5.5); - assert_eq!(r, __msa_fadd_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fadd_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fadd_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2::new(4.4, -3.3); #[rustfmt::skip] - let b = f64x2(4.4, -3.3); - let r = f64x2(5.5, -5.5); + let r = f64x2::new(5.5, -5.5); - assert_eq!(r, __msa_fadd_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fadd_d(::mem::transmute(a), ::mem::transmute(b))) + ); } // Only observed beahiour should be SIGFPE signal @@ -11939,12 +12662,16 @@ mod tests { #[simd_test(enable = "msa")] unsafe fn test_msa_fcaf_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, -4.4); + let a = f32x4::new(1.1, -2.2, 3.3, -4.4); + #[rustfmt::skip] + let b = f32x4::new(0.0, -1.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(0.0, -1.2, 3.3, f32::NAN); - let r = i32x4(0, 0, 0, 0); + let r = i32x4::new(0, 0, 0, 0); - assert_eq!(r, __msa_fcaf_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcaf_w(::mem::transmute(a), ::mem::transmute(b))) + ); } // Only observed beahiour should be SIGFPE signal @@ -11952,320 +12679,426 @@ mod tests { #[simd_test(enable = "msa")] unsafe fn test_msa_fcaf_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2::new(-2.2, 1.1); #[rustfmt::skip] - let b = f64x2(-2.2, 1.1); - let r = i64x2(0, 0); + let r = i64x2::new(0, 0); - assert_eq!(r, __msa_fcaf_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcaf_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fceq_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(-4.4, -2.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(-4.4, -2.2, 3.3, f32::NAN); - let r = i32x4(0, -1, -1, 0); + let r = i32x4::new(0, -1, -1, 0); - assert_eq!(r, __msa_fceq_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fceq_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fceq_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2::new(1.1, 1.1); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_fceq_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fceq_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fclass_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); - let r = i32x4(128, 8, 128, 2); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(128, 8, 128, 2); - assert_eq!(r, __msa_fclass_w(a)); + assert_eq!(r, ::mem::transmute(__msa_fclass_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fclass_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); - let r = i64x2(128, 8); + let a = f64x2::new(1.1, -2.2); + #[rustfmt::skip] + let r = i64x2::new(128, 8); - assert_eq!(r, __msa_fclass_d(a)); + assert_eq!(r, ::mem::transmute(__msa_fclass_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcle_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(-4.4, -1.2, 3.3, f32::NAN); - let r = i32x4(0, -1, -1, 0); + let b = f32x4::new(-4.4, -1.2, 3.3, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(0, -1, -1, 0); - assert_eq!(r, __msa_fcle_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcle_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcle_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(-1, -1); + let b = f64x2::new(1.1, 1.1); + #[rustfmt::skip] + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_fcle_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcle_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fclt_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(-4.4, -1.2, 3.3, f32::NAN); - let r = i32x4(0, -1, 0, 0); + let b = f32x4::new(-4.4, -1.2, 3.3, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(0, -1, 0, 0); - assert_eq!(r, __msa_fclt_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fclt_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fclt_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(0, -1); + let b = f64x2::new(1.1, 1.1); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fclt_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fclt_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcne_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(-4.4, -1.2, 3.3, f32::NAN); - let r = i32x4(-1, -1, 0, 0); + let b = f32x4::new(-4.4, -1.2, 3.3, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(-1, -1, 0, 0); - assert_eq!(r, __msa_fcne_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcne_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcne_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(0, -1); + let b = f64x2::new(1.1, 1.1); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fcne_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcne_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcor_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(f32::NAN, -1.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); - let r = i32x4(0, -1, -1, 0); + let r = i32x4::new(0, -1, -1, 0); - assert_eq!(r, __msa_fcor_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcor_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcor_d() { #[rustfmt::skip] - let a = f64x2(1.1, f64::NAN); + let a = f64x2::new(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2::new(1.1, 1.1); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_fcor_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcor_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcueq_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(f32::NAN, -1.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); - let r = i32x4(-1, 0, -1, -1); + let r = i32x4::new(-1, 0, -1, -1); - assert_eq!(r, __msa_fcueq_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcueq_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcueq_d() { #[rustfmt::skip] - let a = f64x2(1.1, f64::NAN); + let a = f64x2::new(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2::new(1.1, 1.1); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(-1, -1); + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_fcueq_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcueq_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcule_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(f32::NAN, -1.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); - let r = i32x4(-1, -1, -1, -1); + let r = i32x4::new(-1, -1, -1, -1); - assert_eq!(r, __msa_fcule_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcule_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcule_d() { #[rustfmt::skip] - let a = f64x2(1.1, f64::NAN); + let a = f64x2::new(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2::new(1.1, 1.1); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(-1, -1); + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_fcule_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcule_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcult_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(f32::NAN, -1.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); - let r = i32x4(-1, -1, 0, -1); + let r = i32x4::new(-1, -1, 0, -1); - assert_eq!(r, __msa_fcult_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcult_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcult_d() { #[rustfmt::skip] - let a = f64x2(1.1, f64::NAN); + let a = f64x2::new(1.1, f64::NAN); + #[rustfmt::skip] + let b = f64x2::new(1.1, 1.1); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(0, -1); + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fcult_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcult_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcun_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); - let r = i32x4(-1, 0, 0, -1); + let b = f32x4::new(f32::NAN, -1.2, 3.3, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(-1, 0, 0, -1); - assert_eq!(r, __msa_fcun_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcun_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcun_d() { #[rustfmt::skip] - let a = f64x2(1.1, f64::NAN); + let a = f64x2::new(1.1, f64::NAN); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(0, -1); + let b = f64x2::new(1.1, 1.1); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fcun_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcun_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcune_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, f32::NAN); + let a = f32x4::new(1.1, -2.2, 3.3, f32::NAN); #[rustfmt::skip] - let b = f32x4(f32::NAN, -1.2, 3.3, f32::NAN); - let r = i32x4(-1, -1, 0, -1); + let b = f32x4::new(f32::NAN, -1.2, 3.3, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(-1, -1, 0, -1); - assert_eq!(r, __msa_fcune_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcune_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fcune_d() { #[rustfmt::skip] - let a = f64x2(1.1, f64::NAN); + let a = f64x2::new(1.1, f64::NAN); #[rustfmt::skip] - let b = f64x2(1.1, 1.1); - let r = i64x2(0, -1); + let b = f64x2::new(1.1, 1.1); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fcune_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fcune_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fdiv_w() { #[rustfmt::skip] - let a = f32x4(5.25, -20.2, 333.333, -425.0); + let a = f32x4::new(5.25, -20.2, 333.333, -425.0); #[rustfmt::skip] - let b = f32x4(4.0, -2.1, 11.11, 8.2); - let r = f32x4(1.3125, 9.619048, 30.002972, -51.82927); + let b = f32x4::new(4.0, -2.1, 11.11, 8.2); + #[rustfmt::skip] + let r = f32x4::new(1.3125, 9.619048, 30.002972, -51.82927); - assert_eq!(r, __msa_fdiv_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fdiv_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fdiv_d() { #[rustfmt::skip] - let a = f64x2(1111.11, -222222.2); + let a = f64x2::new(1111.11, -222222.2); #[rustfmt::skip] - let b = f64x2(-4.85, 3.33); - let r = f64x2(-229.09484536082473, -66733.3933933934); + let b = f64x2::new(-4.85, 3.33); + #[rustfmt::skip] + let r = f64x2::new(-229.09484536082473, -66733.3933933934); - assert_eq!(r, __msa_fdiv_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fdiv_d(::mem::transmute(a), ::mem::transmute(b))) + ); } /*// FIXME: 16-bit floats #[simd_test(enable = "msa")] unsafe fn test_msa_fexdo_h() { #[rustfmt::skip] - let a = f32x4(20.5, 2.3, 4.5, 5.4); + let a = f32x4::new(20.5, 2.3, 4.5, 5.4); #[rustfmt::skip] - let b = f32x4(1.1, 1.0, 1.0, 1.0); - let r = i16x8(1, 9, 30, 51, 1, 9, 30, 51); + let b = f32x4::new(1.1, 1.0, 1.0, 1.0); + let r = i16x8::new(1, 9, 30, 51, 1, 9, 30, 51); - assert_eq!(r, __msa_fexdo_h(a, b)); + assert_eq!(r, ::mem::transmute(__msa_fexdo_h(::mem::transmute(a), ::mem::transmute(b)))); }*/ #[simd_test(enable = "msa")] unsafe fn test_msa_fexdo_w() { #[rustfmt::skip] - let a = f64x2(2000005.5, 2.3); + let a = f64x2::new(2000005.5, 2.3); #[rustfmt::skip] - let b = f64x2(1235689784512.1, 2147483649998.5); - let r = f32x4( + let b = f64x2::new(1235689784512.1, 2147483649998.5); + #[rustfmt::skip] + let r = f32x4::new( 1235689800000.0, 2147483600000.0, 2000005.5, 2.3 ); - assert_eq!(r, __msa_fexdo_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fexdo_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fexp2_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, -4.4); + let a = f32x4::new(1.1, -2.2, 3.3, -4.4); + #[rustfmt::skip] + let b = i32x4::new(4, -3, 2, 1); #[rustfmt::skip] - let b = i32x4(4, -3, 2, 1); - let r = f32x4(17.6, -0.275, 13.2, -8.8); + let r = f32x4::new(17.6, -0.275, 13.2, -8.8); - assert_eq!(r, __msa_fexp2_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fexp2_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fexp2_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); + #[rustfmt::skip] + let b = i64x2::new(-4, 3); #[rustfmt::skip] - let b = i64x2(-4, 3); - let r = f64x2(0.06875, -17.6); + let r = f64x2::new(0.06875, -17.6); - assert_eq!(r, __msa_fexp2_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fexp2_d(::mem::transmute(a), ::mem::transmute(b))) + ); } // FIXME: 16-bit floats @@ -12274,19 +13107,19 @@ mod tests { // #[rustfmt::skip] // let a = f16x8(1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5); // #[rustfmt::skip] - // let r = f32x4(5.5, 6.5, 7.5, 8.5); + // let r = f32x4::new(5.5, 6.5, 7.5, 8.5); - // assert_eq!(r, __msa_fexupl_w(a)); + // assert_eq!(r, ::mem::transmute(__msa_fexupl_w(::mem::transmute(a)))); // } #[simd_test(enable = "msa")] unsafe fn test_msa_fexupl_d() { #[rustfmt::skip] - let a = f32x4(5.5, 6.5, 7.5, 8.5); + let a = f32x4::new(5.5, 6.5, 7.5, 8.5); #[rustfmt::skip] - let r = f64x2(7.5, 8.5); + let r = f64x2::new(7.5, 8.5); - assert_eq!(r, __msa_fexupl_d(a)); + assert_eq!(r, ::mem::transmute(__msa_fexupl_d(::mem::transmute(a)))); } // FIXME: 16-bit floats @@ -12295,1428 +13128,1767 @@ mod tests { // #[rustfmt::skip] // let a = f16x8(1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5); // #[rustfmt::skip] - // let r = f32x4(1.5, 2.5, 3.5, 4.5); + // let r = f32x4::new(1.5, 2.5, 3.5, 4.5); - // assert_eq!(r, __msa_fexupr_w(a)); + // assert_eq!(r, ::mem::transmute(__msa_fexupr_w(::mem::transmute(a)))); // } #[simd_test(enable = "msa")] unsafe fn test_msa_fexupr_d() { #[rustfmt::skip] - let a = f32x4(5.5, 6.5, 7.5, 8.5); + let a = f32x4::new(5.5, 6.5, 7.5, 8.5); #[rustfmt::skip] - let r = f64x2(5.5, 6.5); + let r = f64x2::new(5.5, 6.5); - assert_eq!(r, __msa_fexupr_d(a)); + assert_eq!(r, ::mem::transmute(__msa_fexupr_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffint_s_w() { #[rustfmt::skip] - let a = i32x4(-1, 2, -3, 4); + let a = i32x4::new(-1, 2, -3, 4); #[rustfmt::skip] - let r = f32x4(-1.0, 2.0, -3.0, 4.0); + let r = f32x4::new(-1.0, 2.0, -3.0, 4.0); - assert_eq!(r, __msa_ffint_s_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ffint_s_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffint_s_d() { #[rustfmt::skip] - let a = i64x2(-1, 2); + let a = i64x2::new(-1, 2); #[rustfmt::skip] - let r = f64x2(-1.0, 2.0); + let r = f64x2::new(-1.0, 2.0); - assert_eq!(r, __msa_ffint_s_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ffint_s_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffint_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let r = f32x4(1.0, 2.0, 3.0, 4.0); + let r = f32x4::new(1.0, 2.0, 3.0, 4.0); - assert_eq!(r, __msa_ffint_u_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ffint_u_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffint_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); #[rustfmt::skip] - let r = f64x2(1.0, 2.0); + let r = f64x2::new(1.0, 2.0); - assert_eq!(r, __msa_ffint_u_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ffint_u_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffql_w() { #[rustfmt::skip] - let a = i16x8(11, 25, 33, 47, 11, 25, 33, 47); + let a = i16x8::new(11, 25, 33, 47, 11, 25, 33, 47); #[rustfmt::skip] - let r = f32x4( + let r = f32x4::new( 0.00033569336, 0.00076293945, 0.0010070801, 0.0014343262 ); - assert_eq!(r, __msa_ffql_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ffql_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffql_d() { #[rustfmt::skip] - let a = i32x4(1111, 2222, 3333, 4444); + let a = i32x4::new(1111, 2222, 3333, 4444); #[rustfmt::skip] - let r = f64x2( + let r = f64x2::new( 0.000001552049070596695, 0.0000020693987607955933 ); - assert_eq!(r, __msa_ffql_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ffql_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffqr_w() { #[rustfmt::skip] - let a = i16x8(12, 26, 34, 48, 11, 25, 33, 47); + let a = i16x8::new(12, 26, 34, 48, 11, 25, 33, 47); #[rustfmt::skip] - let r = f32x4( + let r = f32x4::new( 0.00036621094, 0.00079345703, 0.0010375977, 0.0014648438 ); - assert_eq!(r, __msa_ffqr_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ffqr_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ffqr_d() { #[rustfmt::skip] - let a = i32x4(1111, 2555, 3333, 475); + let a = i32x4::new(1111, 2555, 3333, 475); #[rustfmt::skip] - let r = f64x2( + let r = f64x2::new( 0.0000005173496901988983, 0.0000011897645890712738 ); - assert_eq!(r, __msa_ffqr_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ffqr_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fill_b() { #[rustfmt::skip] - let r = i8x16( + let r = i8x16::new( 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ); - assert_eq!(r, __msa_fill_b(2)); + assert_eq!(r, ::mem::transmute(__msa_fill_b(2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fill_h() { #[rustfmt::skip] - let r = i16x8(2, 2, 2, 2, 2, 2, 2, 2); + let r = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2); - assert_eq!(r, __msa_fill_h(2)); + assert_eq!(r, ::mem::transmute(__msa_fill_h(2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fill_w() { #[rustfmt::skip] - let r = i32x4(2, 2, 2, 2); + let r = i32x4::new(2, 2, 2, 2); - assert_eq!(r, __msa_fill_w(2)); + assert_eq!(r, ::mem::transmute(__msa_fill_w(2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fill_d() { #[rustfmt::skip] - let r = i64x2(2, 2); + let r = i64x2::new(2, 2); - assert_eq!(r, __msa_fill_d(2)); + assert_eq!(r, ::mem::transmute(__msa_fill_d(2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_flog2_w() { #[rustfmt::skip] - let a = f32x4(8.0, 16.0, 32.0, 64.0); + let a = f32x4::new(8.0, 16.0, 32.0, 64.0); #[rustfmt::skip] - let r = f32x4(3.0, 4.0, 5.0, 6.0); + let r = f32x4::new(3.0, 4.0, 5.0, 6.0); - assert_eq!(r, __msa_flog2_w(a)); + assert_eq!(r, ::mem::transmute(__msa_flog2_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_flog2_d() { #[rustfmt::skip] - let a = f64x2(8.0, 16.0); + let a = f64x2::new(8.0, 16.0); #[rustfmt::skip] - let r = f64x2(3.0, 4.0); + let r = f64x2::new(3.0, 4.0); - assert_eq!(r, __msa_flog2_d(a)); + assert_eq!(r, ::mem::transmute(__msa_flog2_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmadd_w() { #[rustfmt::skip] - let a = f32x4(1.0, 2.0, 3.0, 4.0); - let b = f32x4(5.0, 6.0, 7.0, 8.0); - let c = f32x4(9.0, 10.0, 11.0, 12.0); + let a = f32x4::new(1.0, 2.0, 3.0, 4.0); + #[rustfmt::skip] + let b = f32x4::new(5.0, 6.0, 7.0, 8.0); #[rustfmt::skip] - let r = f32x4(46.0, 62.0, 80.0, 100.0); + let c = f32x4::new(9.0, 10.0, 11.0, 12.0); + #[rustfmt::skip] + let r = f32x4::new(46.0, 62.0, 80.0, 100.0); - assert_eq!(r, __msa_fmadd_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_fmadd_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmadd_d() { #[rustfmt::skip] - let a = f64x2(1.0, 2.0); - let b = f64x2(3.0, 4.0); - let c = f64x2(5.0, 6.0); + let a = f64x2::new(1.0, 2.0); + #[rustfmt::skip] + let b = f64x2::new(3.0, 4.0); + #[rustfmt::skip] + let c = f64x2::new(5.0, 6.0); #[rustfmt::skip] - let r = f64x2(16.0, 26.0); + let r = f64x2::new(16.0, 26.0); - assert_eq!(r, __msa_fmadd_d(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_fmadd_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmax_w() { #[rustfmt::skip] - let a = f32x4(1.0, -6.0, 7.0, 8.0); - let b = f32x4(5.0, -2.0, 3.0, 4.0); + let a = f32x4::new(1.0, -6.0, 7.0, 8.0); + #[rustfmt::skip] + let b = f32x4::new(5.0, -2.0, 3.0, 4.0); #[rustfmt::skip] - let r = f32x4(5.0, -2.0, 7.0, 8.0); + let r = f32x4::new(5.0, -2.0, 7.0, 8.0); - assert_eq!(r, __msa_fmax_w(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmax_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmax_d() { #[rustfmt::skip] - let a = f64x2(1.0, 4.0); - let b = f64x2(3.0, 2.0); + let a = f64x2::new(1.0, 4.0); + #[rustfmt::skip] + let b = f64x2::new(3.0, 2.0); #[rustfmt::skip] - let r = f64x2(3.0, 4.0); + let r = f64x2::new(3.0, 4.0); - assert_eq!(r, __msa_fmax_d(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmax_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmax_a_w() { #[rustfmt::skip] - let a = f32x4(1.0, -6.0, -7.0, -8.0); - let b = f32x4(5.0, -2.0, 3.0, 4.0); + let a = f32x4::new(1.0, -6.0, -7.0, -8.0); + #[rustfmt::skip] + let b = f32x4::new(5.0, -2.0, 3.0, 4.0); #[rustfmt::skip] - let r = f32x4(5.0, -6.0, -7.0, -8.0); + let r = f32x4::new(5.0, -6.0, -7.0, -8.0); - assert_eq!(r, __msa_fmax_a_w(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmax_a_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmax_a_d() { #[rustfmt::skip] - let a = f64x2(1.0, -4.0); - let b = f64x2(3.0, 2.0); + let a = f64x2::new(1.0, -4.0); #[rustfmt::skip] - let r = f64x2(3.0, -4.0); + let b = f64x2::new(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2::new(3.0, -4.0); - assert_eq!(r, __msa_fmax_a_d(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmax_a_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmin_w() { #[rustfmt::skip] - let a = f32x4(1.0, -6.0, 7.0, 8.0); - let b = f32x4(5.0, -2.0, 3.0, 4.0); + let a = f32x4::new(1.0, -6.0, 7.0, 8.0); #[rustfmt::skip] - let r = f32x4(1.0, -6.0, 3.0, 4.0); + let b = f32x4::new(5.0, -2.0, 3.0, 4.0); + #[rustfmt::skip] + let r = f32x4::new(1.0, -6.0, 3.0, 4.0); - assert_eq!(r, __msa_fmin_w(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmin_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmin_d() { #[rustfmt::skip] - let a = f64x2(1.0, 4.0); - let b = f64x2(3.0, 2.0); + let a = f64x2::new(1.0, 4.0); #[rustfmt::skip] - let r = f64x2(1.0, 2.0); + let b = f64x2::new(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2::new(1.0, 2.0); - assert_eq!(r, __msa_fmin_d(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmin_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmin_a_w() { #[rustfmt::skip] - let a = f32x4(1.0, -6.0, -7.0, -8.0); - let b = f32x4(5.0, -2.0, 3.0, 4.0); + let a = f32x4::new(1.0, -6.0, -7.0, -8.0); #[rustfmt::skip] - let r = f32x4(1.0, -2.0, 3.0, 4.0); + let b = f32x4::new(5.0, -2.0, 3.0, 4.0); + #[rustfmt::skip] + let r = f32x4::new(1.0, -2.0, 3.0, 4.0); - assert_eq!(r, __msa_fmin_a_w(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmin_a_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmin_a_d() { #[rustfmt::skip] - let a = f64x2(1.0, -4.0); - let b = f64x2(3.0, 2.0); + let a = f64x2::new(1.0, -4.0); #[rustfmt::skip] - let r = f64x2(1.0, 2.0); + let b = f64x2::new(3.0, 2.0); + #[rustfmt::skip] + let r = f64x2::new(1.0, 2.0); - assert_eq!(r, __msa_fmin_a_d(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmin_a_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmsub_w() { #[rustfmt::skip] - let a = f32x4(1.0, 2.0, 3.0, 4.0); - let b = f32x4(5.0, 6.0, 7.0, 8.0); - let c = f32x4(9.0, 10.0, 11.0, 12.0); + let a = f32x4::new(1.0, 2.0, 3.0, 4.0); + #[rustfmt::skip] + let b = f32x4::new(5.0, 6.0, 7.0, 8.0); + #[rustfmt::skip] + let c = f32x4::new(9.0, 10.0, 11.0, 12.0); #[rustfmt::skip] - let r = f32x4(-44.0, -58.0, -74.0, -92.0); + let r = f32x4::new(-44.0, -58.0, -74.0, -92.0); - assert_eq!(r, __msa_fmsub_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_fmsub_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmsub_d() { #[rustfmt::skip] - let a = f64x2(1.0, 2.0); - let b = f64x2(3.0, 4.0); - let c = f64x2(5.0, 6.0); + let a = f64x2::new(1.0, 2.0); + #[rustfmt::skip] + let b = f64x2::new(3.0, 4.0); + #[rustfmt::skip] + let c = f64x2::new(5.0, 6.0); #[rustfmt::skip] - let r = f64x2(-14.0, -22.0); + let r = f64x2::new(-14.0, -22.0); - assert_eq!(r, __msa_fmsub_d(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_fmsub_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmul_w() { #[rustfmt::skip] - let a = f32x4(1.1, -2.2, 3.3, 4.4); + let a = f32x4::new(1.1, -2.2, 3.3, 4.4); + #[rustfmt::skip] + let b = f32x4::new(4.4, 3.3, 2.2, -1.1); #[rustfmt::skip] - let b = f32x4(4.4, 3.3, 2.2, -1.1); - let r = f32x4(4.84, -7.26, 7.26, -4.84); + let r = f32x4::new(4.84, -7.26, 7.26, -4.84); - assert_eq!(r, __msa_fmul_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmul_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fmul_d() { #[rustfmt::skip] - let a = f64x2(1.1, -2.2); + let a = f64x2::new(1.1, -2.2); + #[rustfmt::skip] + let b = f64x2::new(4.0, -3.3); #[rustfmt::skip] - let b = f64x2(4.0, -3.3); - let r = f64x2(4.4, 7.26); + let r = f64x2::new(4.4, 7.26); - assert_eq!(r, __msa_fmul_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fmul_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_frint_w() { #[rustfmt::skip] - let a = f32x4(2.6, -2.7, 1.3, -1.7);; + let a = f32x4::new(2.6, -2.7, 1.3, -1.7);; #[rustfmt::skip] - let r = f32x4(3.0, -3.0, 1.0, -2.0); + let r = f32x4::new(3.0, -3.0, 1.0, -2.0); - assert_eq!(r, __msa_frint_w(a)); + assert_eq!(r, ::mem::transmute(__msa_frint_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_frint_d() { #[rustfmt::skip] - let a = f64x2(2.6, 1.3); + let a = f64x2::new(2.6, 1.3); #[rustfmt::skip] - let r = f64x2(3.0, 1.0); + let r = f64x2::new(3.0, 1.0); - assert_eq!(r, __msa_frint_d(a)); + assert_eq!(r, ::mem::transmute(__msa_frint_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_frcp_w() { #[rustfmt::skip] - let a = f32x4(2.6, -2.7, 1.3, -1.7);; + let a = f32x4::new(2.6, -2.7, 1.3, -1.7);; #[rustfmt::skip] - let r = f32x4( + let r = f32x4::new( 0.3846154, -0.37037036, 0.7692308, -0.58823526 ); - assert_eq!(r, __msa_frcp_w(a)); + assert_eq!(r, ::mem::transmute(__msa_frcp_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_frcp_d() { #[rustfmt::skip] - let a = f64x2(2.6, 1.3); + let a = f64x2::new(2.6, 1.3); #[rustfmt::skip] - let r = f64x2(0.3846153846153846, 0.7692307692307692); + let r = f64x2::new(0.3846153846153846, 0.7692307692307692); - assert_eq!(r, __msa_frcp_d(a)); + assert_eq!(r, ::mem::transmute(__msa_frcp_d(::mem::transmute(a)))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_frsqrt_w() { #[rustfmt::skip] - let a = f32x4(2.6, 2.7, 1.3, 1.7);; + let a = f32x4::new(2.6, 2.7, 1.3, 1.7);; #[rustfmt::skip] - let r = f32x4( + let r = f32x4::new( 0.6201737, 0.6085806, 0.87705797, 0.766965 ); - assert_eq!(r, __msa_frsqrt_w(a)); + assert_eq!(r, ::mem::transmute(__msa_frsqrt_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_frsqrt_d() { #[rustfmt::skip] - let a = f64x2(2.6, 1.3); + let a = f64x2::new(2.6, 1.3); #[rustfmt::skip] - let r = f64x2(0.6201736729460422, 0.8770580193070292); + let r = f64x2::new(0.6201736729460422, 0.8770580193070292); - assert_eq!(r, __msa_frsqrt_d(a)); + assert_eq!(r, ::mem::transmute(__msa_frsqrt_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsaf_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 5.5, 5.5, 5.5); + let a = f32x4::new(-5.5, 5.5, 5.5, 5.5); #[rustfmt::skip] - let b = f32x4(-5.5, 5.5, 5.5, 5.5); - let r = i32x4(0, 0, 0, 0); + let b = f32x4::new(-5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let r = i32x4::new(0, 0, 0, 0); - assert_eq!(r, __msa_fsaf_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsaf_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsaf_d() { #[rustfmt::skip] - let a = f64x2(-125.5, 5.5); + let a = f64x2::new(-125.5, 5.5); #[rustfmt::skip] - let b = f64x2(125.5, 3.3); - let r = i64x2(0, 0); + let b = f64x2::new(125.5, 3.3); + #[rustfmt::skip] + let r = i64x2::new(0, 0); - assert_eq!(r, __msa_fsaf_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsaf_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fseq_w() { #[rustfmt::skip] - let a = f32x4(-5.5, -3.3, f32::NAN, f32::NAN); + let a = f32x4::new(-5.5, -3.3, f32::NAN, f32::NAN); #[rustfmt::skip] - let b = f32x4(5.5, -3.3, f32::NAN, 1.1); - let r = i32x4(0, -1, 0, 0); + let b = f32x4::new(5.5, -3.3, f32::NAN, 1.1); + #[rustfmt::skip] + let r = i32x4::new(0, -1, 0, 0); - assert_eq!(r, __msa_fseq_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fseq_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fseq_d() { #[rustfmt::skip] - let a = f64x2(-125.5, 5.5); + let a = f64x2::new(-125.5, 5.5); #[rustfmt::skip] - let b = f64x2(125.5, 5.5); - let r = i64x2(0, -1); + let b = f64x2::new(125.5, 5.5); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fseq_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fseq_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsle_w() { #[rustfmt::skip] - let a = f32x4(5.5, 5.5, 5.5, f32::NAN); + let a = f32x4::new(5.5, 5.5, 5.5, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(-5.5, 3.3, 5.5, f32::NAN); #[rustfmt::skip] - let b = f32x4(-5.5, 3.3, 5.5, f32::NAN); - let r = i32x4(0, 0, -1, 0); + let r = i32x4::new(0, 0, -1, 0); - assert_eq!(r, __msa_fsle_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsle_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsle_d() { #[rustfmt::skip] - let a = f64x2(-125.5, 5.5); + let a = f64x2::new(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2::new(125.5, 3.3); #[rustfmt::skip] - let b = f64x2(125.5, 3.3); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_fsle_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsle_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fslt_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 5.5, 5.5, 5.5); + let a = f32x4::new(-5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4::new(5.5, 3.3, 5.5, 1.1); #[rustfmt::skip] - let b = f32x4(5.5, 3.3, 5.5, 1.1); - let r = i32x4(-1, 0, 0, 0); + let r = i32x4::new(-1, 0, 0, 0); - assert_eq!(r, __msa_fslt_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fslt_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fslt_d() { #[rustfmt::skip] - let a = f64x2(-125.5, 5.5); + let a = f64x2::new(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2::new(125.5, 3.3); #[rustfmt::skip] - let b = f64x2(125.5, 3.3); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_fslt_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fslt_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsne_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 5.5, 5.5, 5.5); + let a = f32x4::new(-5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4::new(5.5, 3.3, 5.5, 1.1); #[rustfmt::skip] - let b = f32x4(5.5, 3.3, 5.5, 1.1); - let r = i32x4(-1, -1, 0, -1); + let r = i32x4::new(-1, -1, 0, -1); - assert_eq!(r, __msa_fsne_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsne_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsne_d() { #[rustfmt::skip] - let a = f64x2(-125.5, 5.5); + let a = f64x2::new(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2::new(125.5, 5.5); #[rustfmt::skip] - let b = f64x2(125.5, 5.5); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_fsne_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsne_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsor_w() { #[rustfmt::skip] - let a = f32x4(-5.5, f32::NAN, 5.5, 5.5); + let a = f32x4::new(-5.5, f32::NAN, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4::new(5.5, 3.3, 5.5, 1.1); #[rustfmt::skip] - let b = f32x4(5.5, 3.3, 5.5, 1.1); - let r = i32x4(-1, 0, -1, -1); + let r = i32x4::new(-1, 0, -1, -1); - assert_eq!(r, __msa_fsor_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsor_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsor_d() { #[rustfmt::skip] - let a = f64x2(-125.5, 5.5); + let a = f64x2::new(-125.5, 5.5); + #[rustfmt::skip] + let b = f64x2::new(125.5, f64::NAN); #[rustfmt::skip] - let b = f64x2(125.5, f64::NAN); - let r = i64x2(-1, 0); + let r = i64x2::new(-1, 0); - assert_eq!(r, __msa_fsor_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsor_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsqrt_w() { #[rustfmt::skip] - let a = f32x4(9.0, 81.0, 1089.0, 10000.0); - let r = f32x4(3.0, 9.0, 33.0, 100.0); + let a = f32x4::new(9.0, 81.0, 1089.0, 10000.0); + #[rustfmt::skip] + let r = f32x4::new(3.0, 9.0, 33.0, 100.0); - assert_eq!(r, __msa_fsqrt_w(a)); + assert_eq!(r, ::mem::transmute(__msa_fsqrt_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsqrt_d() { #[rustfmt::skip] - let a = f64x2(81.0, 10000.0); - let r = f64x2(9.0, 100.0); + let a = f64x2::new(81.0, 10000.0); + #[rustfmt::skip] + let r = f64x2::new(9.0, 100.0); - assert_eq!(r, __msa_fsqrt_d(a)); + assert_eq!(r, ::mem::transmute(__msa_fsqrt_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsub_w() { #[rustfmt::skip] - let a = f32x4(5.5, 6.5, 7.5, 8.5); + let a = f32x4::new(5.5, 6.5, 7.5, 8.5); + #[rustfmt::skip] + let b = f32x4::new(1.25, 1.75, 2.25, 2.75); #[rustfmt::skip] - let b = f32x4(1.25, 1.75, 2.25, 2.75); - let r = f32x4(4.25, 4.75, 5.25, 5.75); + let r = f32x4::new(4.25, 4.75, 5.25, 5.75); - assert_eq!(r, __msa_fsub_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsub_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsub_d() { #[rustfmt::skip] - let a = f64x2(555.5, 55.5); + let a = f64x2::new(555.5, 55.5); + #[rustfmt::skip] + let b = f64x2::new(4.25, 3.25); #[rustfmt::skip] - let b = f64x2(4.25, 3.25); - let r = f64x2(551.25, 52.25); + let r = f64x2::new(551.25, 52.25); - assert_eq!(r, __msa_fsub_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsub_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsueq_w() { #[rustfmt::skip] - let a = f32x4(5.5, f32::NAN, 5.5, 5.5); + let a = f32x4::new(5.5, f32::NAN, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4::new(5.5, 5.5, -5.5, 5.5); #[rustfmt::skip] - let b = f32x4(5.5, 5.5, -5.5, 5.5); - let r = i32x4(-1, -1, 0, -1); + let r = i32x4::new(-1, -1, 0, -1); - assert_eq!(r, __msa_fsueq_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsueq_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsueq_d() { #[rustfmt::skip] - let a = f64x2(-5.5, 5.5); + let a = f64x2::new(-5.5, 5.5); + #[rustfmt::skip] + let b = f64x2::new(5.5, f64::NAN); #[rustfmt::skip] - let b = f64x2(5.5, f64::NAN); - let r = i64x2(0, -1); + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fsueq_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsueq_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsule_w() { #[rustfmt::skip] - let a = f32x4(5.7, 5.8, 5.9, f32::NAN); + let a = f32x4::new(5.7, 5.8, 5.9, f32::NAN); + #[rustfmt::skip] + let b = f32x4::new(5.6, 5.9, 5.9, f32::NAN); #[rustfmt::skip] - let b = f32x4(5.6, 5.9, 5.9, f32::NAN); - let r = i32x4(0, -1, -1, -1); + let r = i32x4::new(0, -1, -1, -1); - assert_eq!(r, __msa_fsule_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsule_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsule_d() { #[rustfmt::skip] - let a = f64x2(5.5, 5.5); + let a = f64x2::new(5.5, 5.5); + #[rustfmt::skip] + let b = f64x2::new(5.5, 5.5); #[rustfmt::skip] - let b = f64x2(5.5, 5.5); - let r = i64x2(-1, -1); + let r = i64x2::new(-1, -1); - assert_eq!(r, __msa_fsule_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsule_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsult_w() { #[rustfmt::skip] - let a = f32x4(5.5, 5.5, 5.5, 5.5); + let a = f32x4::new(5.5, 5.5, 5.5, 5.5); + #[rustfmt::skip] + let b = f32x4::new(5.6, f32::NAN, 2.2, 1.1); #[rustfmt::skip] - let b = f32x4(5.6, f32::NAN, 2.2, 1.1); - let r = i32x4(-1, -1, 0, 0); + let r = i32x4::new(-1, -1, 0, 0); - assert_eq!(r, __msa_fsult_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsult_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsult_d() { #[rustfmt::skip] - let a = f64x2(5.5, f64::NAN); + let a = f64x2::new(5.5, f64::NAN); #[rustfmt::skip] - let b = f64x2(4.4, 3.3); - let r = i64x2(0, -1); + let b = f64x2::new(4.4, 3.3); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fsult_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsult_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsun_w() { #[rustfmt::skip] - let a = f32x4(5.5, 5.5, f32::NAN, 5.5); + let a = f32x4::new(5.5, 5.5, f32::NAN, 5.5); #[rustfmt::skip] - let b = f32x4(4.4, 3.3, 2.2, f32::NAN); - let r = i32x4(0, 0, -1, -1); + let b = f32x4::new(4.4, 3.3, 2.2, f32::NAN); + #[rustfmt::skip] + let r = i32x4::new(0, 0, -1, -1); - assert_eq!(r, __msa_fsun_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsun_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsun_d() { #[rustfmt::skip] - let a = f64x2(5.5, f64::NAN); + let a = f64x2::new(5.5, f64::NAN); #[rustfmt::skip] - let b = f64x2(4.4, 3.3); - let r = i64x2(0, -1); + let b = f64x2::new(4.4, 3.3); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fsun_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsun_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsune_w() { #[rustfmt::skip] - let a = f32x4(5.5, 5.5, f32::NAN, 5.5); + let a = f32x4::new(5.5, 5.5, f32::NAN, 5.5); #[rustfmt::skip] - let b = f32x4(4.4, 3.3, 2.2, 5.5); - let r = i32x4(-1, -1, -1, 0); + let b = f32x4::new(4.4, 3.3, 2.2, 5.5); + #[rustfmt::skip] + let r = i32x4::new(-1, -1, -1, 0); - assert_eq!(r, __msa_fsune_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsune_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_fsune_d() { #[rustfmt::skip] - let a = f64x2(5.5, f64::NAN); + let a = f64x2::new(5.5, f64::NAN); #[rustfmt::skip] - let b = f64x2(5.5, 3.3); - let r = i64x2(0, -1); + let b = f64x2::new(5.5, 3.3); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_fsune_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_fsune_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftint_s_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); - let r = i32x4(-6, 76, -1001, 1219); + let a = f32x4::new(-5.5, 75.6, -1000.7, 1219.3); + #[rustfmt::skip] + let r = i32x4::new(-6, 76, -1001, 1219); - assert_eq!(r, __msa_ftint_s_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ftint_s_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftint_s_d() { #[rustfmt::skip] - let a = f64x2(-5.5, 25656.4); - let r = i64x2(-6, 25656); + let a = f64x2::new(-5.5, 25656.4); + #[rustfmt::skip] + let r = i64x2::new(-6, 25656); - assert_eq!(r, __msa_ftint_s_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ftint_s_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftint_u_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); - let r = u32x4(0, 76, 0, 1219); + let a = f32x4::new(-5.5, 75.6, -1000.7, 1219.3); + #[rustfmt::skip] + let r = u32x4::new(0, 76, 0, 1219); - assert_eq!(r, __msa_ftint_u_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ftint_u_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftint_u_d() { #[rustfmt::skip] - let a = f64x2(5.5, -25656.4); - let r = u64x2(6, 0); + let a = f64x2::new(5.5, -25656.4); + #[rustfmt::skip] + let r = u64x2::new(6, 0); - assert_eq!(r, __msa_ftint_u_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ftint_u_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftq_h() { #[rustfmt::skip] - let a = f32x4(0.00001, 0.0002, 0.00001, -0.0002); + let a = f32x4::new(0.00001, 0.0002, 0.00001, -0.0002); + #[rustfmt::skip] + let b = f32x4::new(0.0001, -0.002, 0.0001, 0.002); #[rustfmt::skip] - let b = f32x4(0.0001, -0.002, 0.0001, 0.002); - let r = i16x8(3, -66, 3, 66, 0, 7, 0, -7); + let r = i16x8::new(3, -66, 3, 66, 0, 7, 0, -7); - assert_eq!(r, __msa_ftq_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ftq_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftq_w() { #[rustfmt::skip] - let a = f64x2(0.00001, -0.0002); + let a = f64x2::new(0.00001, -0.0002); + #[rustfmt::skip] + let b = f64x2::new(0.00000045, 0.000015); #[rustfmt::skip] - let b = f64x2(0.00000045, 0.000015); - let r = i32x4(966, 32212, 21475, -429497); + let r = i32x4::new(966, 32212, 21475, -429497); - assert_eq!(r, __msa_ftq_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ftq_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftrunc_s_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); - let r = i32x4(-5, 75, -1000, 1219); + let a = f32x4::new(-5.5, 75.6, -1000.7, 1219.3); + #[rustfmt::skip] + let r = i32x4::new(-5, 75, -1000, 1219); - assert_eq!(r, __msa_ftrunc_s_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ftrunc_s_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftrunc_s_d() { #[rustfmt::skip] - let a = f64x2(-5.5, 25656.4); - let r = i64x2(-5, 25656); + let a = f64x2::new(-5.5, 25656.4); + #[rustfmt::skip] + let r = i64x2::new(-5, 25656); - assert_eq!(r, __msa_ftrunc_s_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ftrunc_s_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftrunc_u_w() { #[rustfmt::skip] - let a = f32x4(-5.5, 75.6, -1000.7, 1219.3); - let r = u32x4(0, 75, 0, 1219); + let a = f32x4::new(-5.5, 75.6, -1000.7, 1219.3); + #[rustfmt::skip] + let r = u32x4::new(0, 75, 0, 1219); - assert_eq!(r, __msa_ftrunc_u_w(a)); + assert_eq!(r, ::mem::transmute(__msa_ftrunc_u_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_ftrunc_u_d() { #[rustfmt::skip] - let a = f64x2(5.5, -25656.4); - let r = u64x2(5, 0); + let a = f64x2::new(5.5, -25656.4); + #[rustfmt::skip] + let r = u64x2::new(5, 0); - assert_eq!(r, __msa_ftrunc_u_d(a)); + assert_eq!(r, ::mem::transmute(__msa_ftrunc_u_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_hadd_s_h() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i16x8(6, 6, 2, -2, 6, 6, 2, -2); + #[rustfmt::skip] + let r = i16x8::new(6, 6, 2, -2, 6, 6, 2, -2); - assert_eq!(r, __msa_hadd_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hadd_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hadd_s_w() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i32x4(6, 6, 2, -2); + #[rustfmt::skip] + let r = i32x4::new(6, 6, 2, -2); - assert_eq!(r, __msa_hadd_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hadd_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hadd_s_d() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i64x2(2, -2); + let r = i64x2::new(2, -2); - assert_eq!(r, __msa_hadd_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hadd_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hadd_u_h() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = u16x8(6, 6, 6, 6, 6, 6, 6, 6); + #[rustfmt::skip] + let r = u16x8::new(6, 6, 6, 6, 6, 6, 6, 6); - assert_eq!(r, __msa_hadd_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hadd_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hadd_u_w() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = u32x4(6, 6, 6, 6); + #[rustfmt::skip] + let r = u32x4::new(6, 6, 6, 6); - assert_eq!(r, __msa_hadd_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hadd_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hadd_u_d() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(4, 3, 2, 1); - let r = u64x2(6, 6); + let b = u32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = u64x2::new(6, 6); - assert_eq!(r, __msa_hadd_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hadd_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hsub_s_h() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i16x8(-2, 2, -6, -6, -2, 2, -6, -6); + #[rustfmt::skip] + let r = i16x8::new(-2, 2, -6, -6, -2, 2, -6, -6); - assert_eq!(r, __msa_hsub_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hsub_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hsub_s_w() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i32x4(-2, 2, -6, -6); + #[rustfmt::skip] + let r = i32x4::new(-2, 2, -6, -6); - assert_eq!(r, __msa_hsub_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hsub_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hsub_s_d() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i64x2(-6, -6); + let b = i32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = i64x2::new(-6, -6); - assert_eq!(r, __msa_hsub_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hsub_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hsub_u_h() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i16x8(-2, 2, -2, 2, -2, 2, -2, 2); + #[rustfmt::skip] + let r = i16x8::new(-2, 2, -2, 2, -2, 2, -2, 2); - assert_eq!(r, __msa_hsub_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hsub_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hsub_u_w() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u16x8( + let b = u16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i32x4(-2, 2, -2, 2); + #[rustfmt::skip] + let r = i32x4::new(-2, 2, -2, 2); - assert_eq!(r, __msa_hsub_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hsub_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_hsub_u_d() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let b = u32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let b = u32x4(4, 3, 2, 1); - let r = i64x2(-2, 2); + let r = i64x2::new(-2, 2); - assert_eq!(r, __msa_hsub_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_hsub_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvev_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3 ); - assert_eq!(r, __msa_ilvev_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvev_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvev_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i16x8(4, 1, 2, 3, 4, 1, 2, 3); + #[rustfmt::skip] + let r = i16x8::new(4, 1, 2, 3, 4, 1, 2, 3); - assert_eq!(r, __msa_ilvev_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvev_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvev_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(4, 1, 2, 3); + let r = i32x4::new(4, 1, 2, 3); - assert_eq!(r, __msa_ilvev_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvev_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvev_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let b = i64x2::new(4, 3); #[rustfmt::skip] - let b = i64x2(4, 3); - let r = i64x2(4, 1); + let r = i64x2::new(4, 1); - assert_eq!(r, __msa_ilvev_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvev_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvl_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 8, 9, 7, 10, 6, 11, 5, 12, 4, 13, 3, 14, 2, 15, 1, 16 ); - assert_eq!(r, __msa_ilvl_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvl_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvl_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 5, 6, 7, 8 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i16x8(4, 5, 3, 6, 2, 7, 1, 8); + #[rustfmt::skip] + let r = i16x8::new(4, 5, 3, 6, 2, 7, 1, 8); - assert_eq!(r, __msa_ilvl_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvl_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvl_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(2, 3, 1, 4); + let r = i32x4::new(2, 3, 1, 4); - assert_eq!(r, __msa_ilvl_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvl_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvl_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(2, 1); - let r = i64x2(1, 2); + let b = i64x2::new(2, 1); + #[rustfmt::skip] + let r = i64x2::new(1, 2); - assert_eq!(r, __msa_ilvl_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvl_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvod_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 15, 2, 13, 4, 11, 6, 9, 8, 7, 10, 5, 12, 3, 14, 1, 16 ); - assert_eq!(r, __msa_ilvod_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvod_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvod_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 5, 6, 7, 8 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i16x8(7, 2, 5, 4, 3, 6, 1, 8); + #[rustfmt::skip] + let r = i16x8::new(7, 2, 5, 4, 3, 6, 1, 8); - assert_eq!(r, __msa_ilvod_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvod_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvod_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(3, 2, 1, 4); + let b = i32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = i32x4::new(3, 2, 1, 4); - assert_eq!(r, __msa_ilvod_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvod_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvod_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(2, 1); - let r = i64x2(1, 2); + let b = i64x2::new(2, 1); + #[rustfmt::skip] + let r = i64x2::new(1, 2); - assert_eq!(r, __msa_ilvod_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvod_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvr_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 16, 1, 15, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8 ); - assert_eq!(r, __msa_ilvr_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvr_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvr_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 5, 6, 7, 8, ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 8, 7, 6, 5, 4, 3, 2, 1, ); - let r = i16x8(8, 1, 7, 2, 6, 3, 5, 4); + #[rustfmt::skip] + let r = i16x8::new(8, 1, 7, 2, 6, 3, 5, 4); - assert_eq!(r, __msa_ilvr_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvr_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvr_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(4, 1, 3, 2); + let r = i32x4::new(4, 1, 3, 2); - assert_eq!(r, __msa_ilvr_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvr_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ilvr_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let b = i64x2::new(2, 1); #[rustfmt::skip] - let b = i64x2(2, 1); - let r = i64x2(2, 1); + let r = i64x2::new(2, 1); - assert_eq!(r, __msa_ilvr_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_ilvr_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_insert_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -100, 127, 4, 127, -100, 127, 4, 127, -100, 127, 4, 127, -100, 127, 4, 127 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -100, 127, 4, 127, -100, 127, 4, 127, -100, 127, 4, 127, 5, 127, 4, 127 ); - assert_eq!(r, __msa_insert_b(a, 12, 5)); + assert_eq!( + r, + ::mem::transmute(__msa_insert_b(::mem::transmute(a), 12, 5)) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_insert_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 32767, 3276, 100, 11, 32767, 3276, 100, 11 ); - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( 32767, 3276, 100, 11, 5, 3276, 100, 11 ); - assert_eq!(r, __msa_insert_h(a, 4, 5)); + assert_eq!( + r, + ::mem::transmute(__msa_insert_h(::mem::transmute(a), 4, 5)) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_insert_w() { #[rustfmt::skip] - let a = i32x4(100, 2147483647, 5, -2147483647); - let r = i32x4(100, 7, 5, -2147483647); + let a = i32x4::new(100, 2147483647, 5, -2147483647); + #[rustfmt::skip] + let r = i32x4::new(100, 7, 5, -2147483647); - assert_eq!(r, __msa_insert_w(a, 1, 7)); + assert_eq!( + r, + ::mem::transmute(__msa_insert_w(::mem::transmute(a), 1, 7)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_insert_d() { #[rustfmt::skip] - let a = i64x2(3, i64::max_value()); + let a = i64x2::new(3, i64::max_value()); #[rustfmt::skip] - let r = i64x2(3, 100); + let r = i64x2::new(3, 100); - assert_eq!(r, __msa_insert_d(a, 1, 100)); + assert_eq!( + r, + ::mem::transmute(__msa_insert_d(::mem::transmute(a), 1, 100)) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_insve_b() { #[rustfmt::skip] - let a = i8x16( - -100, 127, 4, 127, - -100, 127, 4, 127, - -100, 127, 4, 127, - -100, 127, 4, 127 + let a = i8x16::new( + -100, i8::max_value(), 4, i8::max_value(), + -100, i8::max_value(), 4, i8::max_value(), + -100, i8::max_value(), 4, i8::max_value(), + -100, i8::max_value(), 4, i8::max_value() ); - let b = i8x16( + #[rustfmt::skip] + let b = i8x16::new( 5, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -100, 127, 4, 127, -100, 127, 4, 127, -100, 127, 4, 127, 5, 127, 4, 127 ); - assert_eq!(r, __msa_insve_b(a, 12, b)); + assert_eq!( + r, + ::mem::transmute(__msa_insve_b(::mem::transmute(a), 12, ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_insve_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 3276, 100, 11, i16::max_value(), 3276, 100, 11 ); - let b = i16x8( + #[rustfmt::skip] + let b = i16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( 32767, 3276, 100, 11, 1, 3276, 100, 11 ); - assert_eq!(r, __msa_insve_h(a, 4, b)); + assert_eq!( + r, + ::mem::transmute(__msa_insve_h(::mem::transmute(a), 4, ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_insve_w() { #[rustfmt::skip] - let a = i32x4(100, 2147483647, 5, -2147483647); - let b = i32x4(1, 2, 3, 4); - let r = i32x4(100, 2147483647, 5, 1); + let a = i32x4::new(100, 2147483647, 5, -2147483647); + #[rustfmt::skip] + let b = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = i32x4::new(100, 2147483647, 5, 1); - assert_eq!(r, __msa_insve_w(a, 3, b)); + assert_eq!( + r, + ::mem::transmute(__msa_insve_w(::mem::transmute(a), 3, ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_insve_d() { #[rustfmt::skip] - let a = i64x2(3, i64::max_value()); - let b = i64x2(1, 2); + let a = i64x2::new(3, i64::max_value()); + #[rustfmt::skip] + let b = i64x2::new(1, 2); #[rustfmt::skip] - let r = i64x2(3, 1); + let r = i64x2::new(3, 1); - assert_eq!(r, __msa_insve_d(a, 1, b)); + assert_eq!( + r, + ::mem::transmute(__msa_insve_d(::mem::transmute(a), 1, ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ld_b() { - + #[rustfmt::skip] let mut a : [i8; 32] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 - ]; + ]; let p = &mut a[4] as *mut _ as *mut i8; - - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 ); - assert_eq!(r, __msa_ld_b(p, 9)); + assert_eq!(r, ::mem::transmute(__msa_ld_b(p, 9))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ld_h() { - + #[rustfmt::skip] let mut a : [i16; 16] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ]; let p = &mut a[4] as *mut _ as *mut i8; + #[rustfmt::skip] + let r = i16x8::new(3, 4, 5, 6, 7, 8, 9, 10); - let r = i16x8(3, 4, 5, 6, 7, 8, 9, 10); - - assert_eq!(r, __msa_ld_h(p, -2)); + assert_eq!(r, ::mem::transmute(__msa_ld_h(p, -2))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ld_w() { - + #[rustfmt::skip] let mut a : [i32; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; let p = &mut a[3] as *mut _ as *mut i8; + #[rustfmt::skip] + let r = i32x4::new(2, 3, 4, 5); - let r = i32x4(2, 3, 4, 5); - - assert_eq!(r, __msa_ld_w(p, -4)); + assert_eq!(r, ::mem::transmute(__msa_ld_w(p, -4))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ld_d() { - + #[rustfmt::skip] let mut a : [i64; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; let p = &mut a[4] as *mut _ as *mut i8; + #[rustfmt::skip] + let r = i64x2::new(0, 1); - let r = i64x2(0, 1); - - assert_eq!(r, __msa_ld_d(p, -32)); + assert_eq!(r, ::mem::transmute(__msa_ld_d(p, -32))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ldi_b() { - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20 ); - assert_eq!(r, __msa_ldi_b(-20)); + assert_eq!(r, ::mem::transmute(__msa_ldi_b(-20))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ldi_h() { - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( 255, 255, 255, 255, 255, 255, 255, 255 ); - assert_eq!(r, __msa_ldi_h(255)); + assert_eq!(r, ::mem::transmute(__msa_ldi_h(255))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_ldi_w() { - let r = i32x4(-509, -509, -509, -509); + #[rustfmt::skip] + let r = i32x4::new(-509, -509, -509, -509); - assert_eq!(r, __msa_ldi_w(-509)); + assert_eq!(r, ::mem::transmute(__msa_ldi_w(-509))); } // FIXME: https://reviews.llvm.org/D59884 @@ -13724,375 +14896,483 @@ mod tests { // Test passes if 4294967185 is used instead -111 in vector 'r' // #[simd_test(enable = "msa")] // unsafe fn test_msa_ldi_d() { - // let r = i64x2(-111, -111); + // let r = i64x2::new(-111, -111); - // assert_eq!(r, __msa_ldi_d(-111)); + // assert_eq!(r, ::mem::transmute(__msa_ldi_d(-111))); // } #[simd_test(enable = "msa")] unsafe fn test_msa_madd_q_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 1024, i16::min_value(), -1024, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( i16::max_value(), i16::max_value(), 1, -1, 33, 66, 99, 132 ); #[rustfmt::skip] - let r = i16x8(32767, 2047, -32768, -1025, 2, 4, 6, 8); + let r = i16x8::new(32767, 2047, -32768, -1025, 2, 4, 6, 8); - assert_eq!(r, __msa_madd_q_h(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_madd_q_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_madd_q_w() { #[rustfmt::skip] - let a = i32x4(i32::max_value(), i32::min_value(), 1, 2); + let a = i32x4::new(i32::max_value(), i32::min_value(), 1, 2); #[rustfmt::skip] - let b = i32x4(102401, 102401, 102401, 102401); + let b = i32x4::new(102401, 102401, 102401, 102401); #[rustfmt::skip] - let c = i32x4(10240, 20480, 30720, 40960); + let c = i32x4::new(10240, 20480, 30720, 40960); #[rustfmt::skip] - let r = i32x4(2147483647, -2147483648, 2, 3); + let r = i32x4::new(2147483647, -2147483648, 2, 3); - assert_eq!(r, __msa_madd_q_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_madd_q_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maddr_q_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 32767, 1024, -32768, -1024, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( 32767, 32767, 32767, 32767, 33, 66, 99, 132 ); #[rustfmt::skip] - let r = i16x8(32767, 2048, -31744, 0, 2, 4, 6, 8); + let r = i16x8::new(32767, 2048, -31744, 0, 2, 4, 6, 8); - assert_eq!(r, __msa_maddr_q_h(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_maddr_q_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maddr_q_w() { #[rustfmt::skip] - let a = i32x4(i32::max_value(), i32::min_value(), 1, 2); + let a = i32x4::new(i32::max_value(), i32::min_value(), 1, 2); #[rustfmt::skip] - let b = i32x4(102401, 102401, 102401, 102401); + let b = i32x4::new(102401, 102401, 102401, 102401); #[rustfmt::skip] - let c = i32x4(10240, 20480, 30720, 40960); + let c = i32x4::new(10240, 20480, 30720, 40960); #[rustfmt::skip] - let r = i32x4(2147483647, -2147483647, 2, 4); + let r = i32x4::new(2147483647, -2147483647, 2, 4); - assert_eq!(r, __msa_maddr_q_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_maddr_q_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maddv_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8 ); #[rustfmt::skip] - let c = i8x16( + let c = i8x16::new( 9, 10, 11, 12, 9, 10, 11, 12, 9, 10, 11, 12, 9, 10, 11, 12 ); #[rustfmt::skip] - let r = i8x16( + let r = i8x16::new( 46, 62, 80, 100, 46, 62, 80, 100, 46, 62, 80, 100, 46, 62, 80, 100 ); - assert_eq!(r, __msa_maddv_b(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_maddv_b( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maddv_h() { #[rustfmt::skip] - let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = i16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = i16x8(5, 6, 7, 8, 5, 6, 7, 8); + let b = i16x8::new(5, 6, 7, 8, 5, 6, 7, 8); #[rustfmt::skip] - let c = i16x8(9, 10, 11, 12, 9, 10, 11, 12); + let c = i16x8::new(9, 10, 11, 12, 9, 10, 11, 12); #[rustfmt::skip] - let r = i16x8(46, 62, 80, 100, 46, 62, 80, 100); + let r = i16x8::new(46, 62, 80, 100, 46, 62, 80, 100); - assert_eq!(r, __msa_maddv_h(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_maddv_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maddv_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 1, 2); + let a = i32x4::new(1, 2, 1, 2); #[rustfmt::skip] - let b = i32x4(3, 4, 3, 4); + let b = i32x4::new(3, 4, 3, 4); #[rustfmt::skip] - let c = i32x4(5, 6, 5, 6); + let c = i32x4::new(5, 6, 5, 6); #[rustfmt::skip] - let r = i32x4(16, 26, 16, 26); + let r = i32x4::new(16, 26, 16, 26); - assert_eq!(r, __msa_maddv_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_maddv_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maddv_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(3, 4); + let b = i64x2::new(3, 4); #[rustfmt::skip] - let c = i64x2(5, 6); + let c = i64x2::new(5, 6); #[rustfmt::skip] - let r = i64x2(16, 26); + let r = i64x2::new(16, 26); - assert_eq!(r, __msa_maddv_d(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_maddv_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } - #[simd_test(enable = "msa")] unsafe fn test_msa_max_a_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, 6, 7, 8, 9, -6, -7, -8, -9, 6, 7, 8, 9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -6, -7, -8, -9, 6, 7, 8, 9, -6, -7, -8, -9, 6, 7, 8, 9 ); - assert_eq!(r, __msa_max_a_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_a_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_a_h() { #[rustfmt::skip] - let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let a = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8::new(-6, 7, -8, 9, -6, 7, -8, 9); #[rustfmt::skip] - let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); - let r = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + let r = i16x8::new(-6, 7, -8, 9, -6, 7, -8, 9); - assert_eq!(r, __msa_max_a_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_a_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_a_w() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let b = i32x4(6, 7, 8, 9); - let r = i32x4(6, 7, 8, 9); + let r = i32x4::new(6, 7, 8, 9); - assert_eq!(r, __msa_max_a_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_a_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_a_d() { #[rustfmt::skip] - let a = i64x2(-1, 2); + let a = i64x2::new(-1, 2); + #[rustfmt::skip] + let b = i64x2::new(6, -7); #[rustfmt::skip] - let b = i64x2(6, -7); - let r = i64x2(6, -7); + let r = i64x2::new(6, -7); - assert_eq!(r, __msa_max_a_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_a_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, 6, 7, 8, 9, -6, -7, -8, -9, 6, 7, 8, 9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 1, 2, 3, 4, 6, 7, 8, 9, 1, 2, 3, 4, 6, 7, 8, 9 ); - assert_eq!(r, __msa_max_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_s_h() { #[rustfmt::skip] - let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let a = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); #[rustfmt::skip] - let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); - let r = i16x8(1, 7, 3, 9, 1, 7, 3, 9); + let b = i16x8::new(-6, 7, -8, 9, -6, 7, -8, 9); + #[rustfmt::skip] + let r = i16x8::new(1, 7, 3, 9, 1, 7, 3, 9); - assert_eq!(r, __msa_max_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_s_w() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); #[rustfmt::skip] - let b = i32x4(6, 7, 8, 9); - let r = i32x4(6, 7, 8, 9); + let b = i32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = i32x4::new(6, 7, 8, 9); - assert_eq!(r, __msa_max_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_s_d() { #[rustfmt::skip] - let a = i64x2(-1, 2); + let a = i64x2::new(-1, 2); #[rustfmt::skip] - let b = i64x2(6, -7); - let r = i64x2(6, 2); + let b = i64x2::new(6, -7); + #[rustfmt::skip] + let r = i64x2::new(6, 2); - assert_eq!(r, __msa_max_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - assert_eq!(r, __msa_max_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_u_h() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(6, 7, 8, 9, 6, 7, 8, 9); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); - assert_eq!(r, __msa_max_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(6, 7, 8, 9); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(6, 7, 8, 9); - assert_eq!(r, __msa_max_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_max_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); + #[rustfmt::skip] + let b = u64x2::new(6, 7); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(6, 7); + let r = u64x2::new(6, 7); - assert_eq!(r, __msa_max_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_max_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, -20, -6, 8, 1, -20, -6, 8, 1, -20, -6, 8, 1, -20, -6, 8 ); - - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 1, -16, -6, 8, 1, -16, -6, 8, 1, -16, -6, 8, 1, -16, -6, 8 ); - assert_eq!(r, __msa_maxi_s_b(a, -16)); + assert_eq!( + r, + ::mem::transmute(__msa_maxi_s_b(::mem::transmute(a), -16)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_s_h() { #[rustfmt::skip] - let a = i16x8(1, 3, -60, -8, 1, 3, -6, -8); - let r = i16x8(15, 15, 15, 15, 15, 15, 15, 15); + let a = i16x8::new(1, 3, -60, -8, 1, 3, -6, -8); + #[rustfmt::skip] + let r = i16x8::new(15, 15, 15, 15, 15, 15, 15, 15); - assert_eq!(r, __msa_maxi_s_h(a, 15)); + assert_eq!(r, ::mem::transmute(__msa_maxi_s_h(::mem::transmute(a), 15))); } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_s_w() { #[rustfmt::skip] - let a = i32x4(1, 3, -6, -8); - let r = i32x4(1, 3, -5, -5); + let a = i32x4::new(1, 3, -6, -8); + #[rustfmt::skip] + let r = i32x4::new(1, 3, -5, -5); - assert_eq!(r, __msa_maxi_s_w(a, -5)); + assert_eq!(r, ::mem::transmute(__msa_maxi_s_w(::mem::transmute(a), -5))); } // FIXME: https://reviews.llvm.org/D59884 @@ -14101,214 +15381,254 @@ mod tests { // #[simd_test(enable = "msa")] // unsafe fn test_msa_maxi_s_d() { // #[rustfmt::skip] - // let a = i64x2(1, -8); - - // let r = i64x2(-3, -3); + // let a = i64x2::new(1, -8); + // #[rustfmt::skip] + // let r = i64x2::new(-3, -3); - // assert_eq!(r, __msa_maxi_s_d(a, -3)); + // assert_eq!(r, ::mem::transmute(__msa_maxi_s_d(::mem::transmute(a), -3))); // } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 3, 6, 8, 1, 3, 6, 8, 1, 3, 6, 8, 1, 3, 6, 8 ); - - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 5, 5, 6, 8, 5, 5, 6, 8, 5, 5, 6, 8, 5, 5, 6, 8 ); - assert_eq!(r, __msa_maxi_u_b(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_maxi_u_b(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_u_h() { #[rustfmt::skip] - let a = u16x8(1, 3, 6, 8, 1, 3, 6, 8); - let r = u16x8(5, 5, 6, 8, 5, 5, 6, 8); + let a = u16x8::new(1, 3, 6, 8, 1, 3, 6, 8); + #[rustfmt::skip] + let r = u16x8::new(5, 5, 6, 8, 5, 5, 6, 8); - assert_eq!(r, __msa_maxi_u_h(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_maxi_u_h(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_u_w() { #[rustfmt::skip] - let a = u32x4(1, 3, 6, 8); - let r = u32x4(5, 5, 6, 8); + let a = u32x4::new(1, 3, 6, 8); + #[rustfmt::skip] + let r = u32x4::new(5, 5, 6, 8); - assert_eq!(r, __msa_maxi_u_w(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_maxi_u_w(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_maxi_u_d() { #[rustfmt::skip] - let a = u64x2(1, 8); - let r = u64x2(5, 8); + let a = u64x2::new(1, 8); + #[rustfmt::skip] + let r = u64x2::new(5, 8); - assert_eq!(r, __msa_maxi_u_d(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_maxi_u_d(::mem::transmute(a), 5))); } - #[simd_test(enable = "msa")] unsafe fn test_msa_min_a_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, 6, 7, 8, 9, -6, -7, -8, -9, 6, 7, 8, 9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); - assert_eq!(r, __msa_min_a_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_a_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_a_h() { #[rustfmt::skip] - let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let a = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8::new(-6, 7, -8, 9, -6, 7, -8, 9); #[rustfmt::skip] - let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); - let r = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let r = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); - assert_eq!(r, __msa_min_a_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_a_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_a_w() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let b = i32x4(6, 7, 8, 9); - let r = i32x4(1, -2, 3, -4); + let r = i32x4::new(1, -2, 3, -4); - assert_eq!(r, __msa_min_a_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_a_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_a_d() { #[rustfmt::skip] - let a = i64x2(-1, 2); + let a = i64x2::new(-1, 2); + #[rustfmt::skip] + let b = i64x2::new(6, -7); #[rustfmt::skip] - let b = i64x2(6, -7); - let r = i64x2(-1, 2); + let r = i64x2::new(-1, 2); - assert_eq!(r, __msa_min_a_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_a_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, 6, 7, 8, 9, -6, -7, -8, -9, 6, 7, 8, 9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -6, -7, -8, -9, -1, -2, -3, -4, -6, -7, -8, -9, -1, -2, -3, -4 ); - assert_eq!(r, __msa_min_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_s_h() { #[rustfmt::skip] - let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); + let a = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let b = i16x8::new(-6, 7, -8, 9, -6, 7, -8, 9); #[rustfmt::skip] - let b = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); - let r = i16x8(-6, -2, -8, -4, -6, -2, -8, -4); + let r = i16x8::new(-6, -2, -8, -4, -6, -2, -8, -4); - assert_eq!(r, __msa_min_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_s_w() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); + let a = i32x4::new(1, -2, 3, -4); + #[rustfmt::skip] + let b = i32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let b = i32x4(6, 7, 8, 9); - let r = i32x4(1, -2, 3, -4); + let r = i32x4::new(1, -2, 3, -4); - assert_eq!(r, __msa_min_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_s_d() { #[rustfmt::skip] - let a = i64x2(-1, 2); + let a = i64x2::new(-1, 2); + #[rustfmt::skip] + let b = i64x2::new(6, -7); #[rustfmt::skip] - let b = i64x2(6, -7); - let r = i64x2(-1, -7); + let r = i64x2::new(-1, -7); - assert_eq!(r, __msa_min_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10 ); - assert_eq!(r, __msa_mini_s_b(a, -10)); + assert_eq!( + r, + ::mem::transmute(__msa_mini_s_b(::mem::transmute(a), -10)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_s_h() { #[rustfmt::skip] - let a = i16x8(1, -2, 3, -4, 1, -2, 3, -4); - let r = i16x8(-3, -3, -3, -4, -3, -3, -3, -4); + let a = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let r = i16x8::new(-3, -3, -3, -4, -3, -3, -3, -4); - assert_eq!(r, __msa_mini_s_h(a, -3)); + assert_eq!(r, ::mem::transmute(__msa_mini_s_h(::mem::transmute(a), -3))); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_s_w() { #[rustfmt::skip] - let a = i32x4(1, -2, 3, -4); - let r = i32x4(-3, -3, -3, -4); + let a = i32x4::new(1, -2, 3, -4); + #[rustfmt::skip] + let r = i32x4::new(-3, -3, -3, -4); - assert_eq!(r, __msa_mini_s_w(a, -3)); + assert_eq!(r, ::mem::transmute(__msa_mini_s_w(::mem::transmute(a), -3))); } // FIXME: https://reviews.llvm.org/D59884 @@ -14317,1830 +15637,2170 @@ mod tests { // #[simd_test(enable = "msa")] // unsafe fn test_msa_mini_s_d() { // #[rustfmt::skip] - // let a = i64x2(-3, 2); - // let r = i64x2(-1, -3); + // let a = i64x2::new(-3, 2); + // #[rustfmt::skip] + // let r = i64x2::new(-1, -3); - // assert_eq!(r, __msa_mini_s_d(a, -3)); + // assert_eq!(r, ::mem::transmute(__msa_mini_s_d(::mem::transmute(a), -3))); // } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_min_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - assert_eq!(r, __msa_min_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_u_h() { #[rustfmt::skip] - let a = u16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(1, 2, 3, 4, 1, 2, 3, 4,); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4,); - assert_eq!(r, __msa_min_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_u_w() { #[rustfmt::skip] - let a = u32x4(1, 2, 3, 4); + let a = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(1, 2, 3, 4,); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(1, 2, 3, 4,); - assert_eq!(r, __msa_min_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_min_u_d() { #[rustfmt::skip] - let a = u64x2(1, 2); + let a = u64x2::new(1, 2); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(1, 2,); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(1, 2,); - assert_eq!(r, __msa_min_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_min_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 3, 6, 8, 1, 3, 6, 8, 1, 3, 6, 8, 1, 3, 6, 8 ); - - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 1, 3, 5, 5, 1, 3, 5, 5, 1, 3, 5, 5, 1, 3, 5, 5 ); - assert_eq!(r, __msa_mini_u_b(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_mini_u_b(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_u_h() { #[rustfmt::skip] - let a = u16x8(1, 3, 6, 8, 1, 3, 6, 8); - let r = u16x8(1, 3, 5, 5, 1, 3, 5, 5); + let a = u16x8::new(1, 3, 6, 8, 1, 3, 6, 8); + #[rustfmt::skip] + let r = u16x8::new(1, 3, 5, 5, 1, 3, 5, 5); - assert_eq!(r, __msa_mini_u_h(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_mini_u_h(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_u_w() { #[rustfmt::skip] - let a = u32x4(1, 3, 6, 8); - let r = u32x4(1, 3, 5, 5); + let a = u32x4::new(1, 3, 6, 8); + #[rustfmt::skip] + let r = u32x4::new(1, 3, 5, 5); - assert_eq!(r, __msa_mini_u_w(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_mini_u_w(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_mini_u_d() { #[rustfmt::skip] - let a = u64x2(1, 8); - let r = u64x2(1, 5); + let a = u64x2::new(1, 8); + #[rustfmt::skip] + let r = u64x2::new(1, 5); - assert_eq!(r, __msa_mini_u_d(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_mini_u_d(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -6, -7, -8, -9, 6, 7, 8, 9, -6, -7, -8, -9, 6, 7, 8, 9 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 1, 2, 3, 4, -1, -2, -3, -4, 1, 2, 3, 4, -1, -2, -3, -4 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 0, -1, -2, -1, 0, 1, 2, 1, 0, -1, -2, -1, 0, 1, 2, 1 ); - assert_eq!(r, __msa_mod_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_s_h() { #[rustfmt::skip] - let a = i16x8(-6, 7, -8, 9, -6, 7, -8, 9); + let a = i16x8::new(-6, 7, -8, 9, -6, 7, -8, 9); #[rustfmt::skip] - let b = i16x8(1, -2, 3, -4, 1, -2, 3, -4); - let r = i16x8(0, 1, -2, 1, 0, 1, -2, 1); + let b = i16x8::new(1, -2, 3, -4, 1, -2, 3, -4); + #[rustfmt::skip] + let r = i16x8::new(0, 1, -2, 1, 0, 1, -2, 1); - assert_eq!(r, __msa_mod_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_s_w() { #[rustfmt::skip] - let a = i32x4(6, 7, 8, 9); + let a = i32x4::new(6, 7, 8, 9); #[rustfmt::skip] - let b = i32x4(1, -2, 3, -4); - let r = i32x4(0, 1, 2, 1); + let b = i32x4::new(1, -2, 3, -4); + #[rustfmt::skip] + let r = i32x4::new(0, 1, 2, 1); - assert_eq!(r, __msa_mod_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_s_d() { #[rustfmt::skip] - let a = i64x2(6, -7); + let a = i64x2::new(6, -7); #[rustfmt::skip] - let b = i64x2(-1, 2); - let r = i64x2(0, -1); + let b = i64x2::new(-1, 2); + #[rustfmt::skip] + let r = i64x2::new(0, -1); - assert_eq!(r, __msa_mod_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 0, 1, 2, 1, 0, 1, 2, 1, 0, 1, 2, 1, 0, 1, 2, 1 ); - assert_eq!(r, __msa_mod_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_u_h() { #[rustfmt::skip] - let a = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - #[rustfmt::skip] - let b = u16x8(1, 2, 3, 4, 1, 2, 3, 4); - let r = u16x8(0, 1, 2, 1, 0, 1, 2, 1); + let a = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let b = u16x8::new(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let r = u16x8::new(0, 1, 2, 1, 0, 1, 2, 1); - assert_eq!(r, __msa_mod_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_u_w() { #[rustfmt::skip] - let a = u32x4(6, 7, 8, 9); + let a = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let b = u32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = u32x4(1, 2, 3, 4); - let r = u32x4(0, 1, 2, 1); + let r = u32x4::new(0, 1, 2, 1); - assert_eq!(r, __msa_mod_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mod_u_d() { #[rustfmt::skip] - let a = u64x2(6, 7); + let a = u64x2::new(6, 7); + #[rustfmt::skip] + let b = u64x2::new(1, 2); #[rustfmt::skip] - let b = u64x2(1, 2); - let r = u64x2(0, 1); + let r = u64x2::new(0, 1); - assert_eq!(r, __msa_mod_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mod_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_move_v() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 ); #[rustfmt::skip] - let r = i8x16( + let r = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 ); - assert_eq!(r, __msa_move_v(a)); + assert_eq!(r, ::mem::transmute(__msa_move_v(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_msub_q_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1024, -1024, 1024, -1024, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 1025, 1025, 1025, 1025, 1025, 1025, 1025, 1025 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( 1024, 2048, 3072, 4096, 1024, 2048, 3072, 4096 ); #[rustfmt::skip] - let r = i16x8(991, -1089, 927, -1153, -32, -63, -94, -125); + let r = i16x8::new(991, -1089, 927, -1153, -32, -63, -94, -125); - assert_eq!(r, __msa_msub_q_h(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msub_q_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_msub_q_w() { #[rustfmt::skip] - let a = i32x4(2147483647, -2147483647, 1, 2); + let a = i32x4::new(2147483647, -2147483647, 1, 2); #[rustfmt::skip] - let b = i32x4(10240, 10240, 10240, 10240); + let b = i32x4::new(10240, 10240, 10240, 10240); #[rustfmt::skip] - let c = i32x4(10240, 20480, 30720, 40960); + let c = i32x4::new(10240, 20480, 30720, 40960); #[rustfmt::skip] - let r = i32x4(2147483646, -2147483648, 0, 1); + let r = i32x4::new(2147483646, -2147483648, 0, 1); - assert_eq!(r, __msa_msub_q_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msub_q_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_msubr_q_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1024, -1024, 1024, -1024, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 1025, 1025, 1025, 1025, 1025, 1025, 1025, 1025 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( 1024, 2048, 3072, 4096, 1024, 2048, 3072, 4096 ); #[rustfmt::skip] - let r = i16x8(992, -1088, 928, -1152, -31, -62, -93, -124); + let r = i16x8::new(992, -1088, 928, -1152, -31, -62, -93, -124); - assert_eq!(r, __msa_msubr_q_h(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msubr_q_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_msubr_q_w() { #[rustfmt::skip] - let a = i32x4(i32::max_value(), -2147483647, 1, 2); - let b = i32x4(10240, 10240, 10240, 10240); - let c = i32x4(10240, 20480, 30720, 40960); + let a = i32x4::new(i32::max_value(), -2147483647, 1, 2); + #[rustfmt::skip] + let b = i32x4::new(10240, 10240, 10240, 10240); #[rustfmt::skip] - let r = i32x4(2147483647, -2147483647, 1, 2); + let c = i32x4::new(10240, 20480, 30720, 40960); + #[rustfmt::skip] + let r = i32x4::new(2147483647, -2147483647, 1, 2); - assert_eq!(r, __msa_msubr_q_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msubr_q_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_msubv_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - let b = i8x16( + #[rustfmt::skip] + let b = i8x16::new( 5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8 ); - let c = i8x16( + #[rustfmt::skip] + let c = i8x16::new( 9, 10, 11, 12, 9, 10, 11, 12, 9, 10, 11, 12, 9, 10, 11, 12 ); #[rustfmt::skip] - let r = i8x16( + let r = i8x16::new( -44, -58, -74, -92, -44, -58, -74, -92, -44, -58, -74, -92, -44, -58, -74, -92 ); - assert_eq!(r, __msa_msubv_b(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msubv_b( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_msubv_h() { #[rustfmt::skip] - let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); - let b = i16x8(5, 6, 7, 8, 5, 6, 7, 8); - let c = i16x8(9, 10, 11, 12, 9, 10, 11, 12); + let a = i16x8::new(1, 2, 3, 4, 1, 2, 3, 4); + #[rustfmt::skip] + let b = i16x8::new(5, 6, 7, 8, 5, 6, 7, 8); #[rustfmt::skip] - let r = i16x8(-44, -58, -74, -92, -44, -58, -74, -92); + let c = i16x8::new(9, 10, 11, 12, 9, 10, 11, 12); + #[rustfmt::skip] + let r = i16x8::new(-44, -58, -74, -92, -44, -58, -74, -92); - assert_eq!(r, __msa_msubv_h(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msubv_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_msubv_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 1, 2); - let b = i32x4(3, 4, 3, 4); - let c = i32x4(5, 6, 5, 6); + let a = i32x4::new(1, 2, 1, 2); + #[rustfmt::skip] + let b = i32x4::new(3, 4, 3, 4); + #[rustfmt::skip] + let c = i32x4::new(5, 6, 5, 6); #[rustfmt::skip] - let r = i32x4(-14, -22, -14, -22); + let r = i32x4::new(-14, -22, -14, -22); - assert_eq!(r, __msa_msubv_w(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msubv_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_msubv_d() { #[rustfmt::skip] - let a = i64x2(1, 2); - let b = i64x2(3, 4); - let c = i64x2(5, 6); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let b = i64x2::new(3, 4); #[rustfmt::skip] - let r = i64x2(-14, -22); + let c = i64x2::new(5, 6); + #[rustfmt::skip] + let r = i64x2::new(-14, -22); - assert_eq!(r, __msa_msubv_d(a,b,c)); + assert_eq!( + r, + ::mem::transmute(__msa_msubv_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mul_q_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 12500, -20, -300, 400, 12500, 20, 300, 400 ); - let b = i16x8( + #[rustfmt::skip] + let b = i16x8::new( 1250, 10240, -7585, 8456, 1250, 10240, -7585, 8456 ); #[rustfmt::skip] - let r = i16x8(476, -7, 69, 103, 476, 6, -70, 103); + let r = i16x8::new(476, -7, 69, 103, 476, 6, -70, 103); - assert_eq!(r, __msa_mul_q_h(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_mul_q_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mul_q_w() { #[rustfmt::skip] - let a = i32x4( + let a = i32x4::new( i32::max_value(), i32::max_value(), i32::min_value(), i32::min_value() ); - let b = i32x4(30, 60, 30, 60); #[rustfmt::skip] - let r = i32x4(29, 59, -30, -60); + let b = i32x4::new(30, 60, 30, 60); + #[rustfmt::skip] + let r = i32x4::new(29, 59, -30, -60); - assert_eq!(r, __msa_mul_q_w(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_mul_q_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mulr_q_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 12500, -20, -300, 400, 12500, 20, 300, 400 ); - let b = i16x8( + #[rustfmt::skip] + let b = i16x8::new( 1250, 10240, -7585, 8456, 1250, 10240, -7585, 8456 ); #[rustfmt::skip] - let r = i16x8(477, -6, 69, 103, 477, 6, -69, 103); + let r = i16x8::new(477, -6, 69, 103, 477, 6, -69, 103); - assert_eq!(r, __msa_mulr_q_h(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_mulr_q_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mulr_q_w() { #[rustfmt::skip] - let a = i32x4( + let a = i32x4::new( i32::max_value(), i32::max_value(), i32::min_value(), i32::min_value() ); - let b = i32x4(30, 60, 30, 60); #[rustfmt::skip] - let r = i32x4(30, 60, -30, -60); + let b = i32x4::new(30, 60, 30, 60); + #[rustfmt::skip] + let r = i32x4::new(30, 60, -30, -60); - assert_eq!(r, __msa_mulr_q_w(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_mulr_q_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mulv_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 16, 30, 42, 52, 60, 66, 70, 72, 72, 70, 66, 60, 52, 42, 30, 16 ); - assert_eq!(r, __msa_mulv_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mulv_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mulv_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 5, 6, 7, 8 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i16x8(8, 14, 18, 20, 20, 18, 14, 8); + #[rustfmt::skip] + let r = i16x8::new(8, 14, 18, 20, 20, 18, 14, 8); - assert_eq!(r, __msa_mulv_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mulv_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mulv_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(4, 6, 6, 4); + let b = i32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = i32x4::new(4, 6, 6, 4); - assert_eq!(r, __msa_mulv_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mulv_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_mulv_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(2, 1); - let r = i64x2(2, 2); + let b = i64x2::new(2, 1); + #[rustfmt::skip] + let r = i64x2::new(2, 2); - assert_eq!(r, __msa_mulv_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_mulv_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_nloc_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, -64, -32, -16, -8, -4, -2, -1, 1, 2, 4, 8, 16, 32, 64, 127 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0 ); - assert_eq!(r, __msa_nloc_b(a)); + assert_eq!(r, ::mem::transmute(__msa_nloc_b(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nloc_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( -32768, -16384, -8192, -4096, 4096, 8192, 16384, 32767 ); - let r = i16x8(1, 2, 3, 4, 0, 0, 0, 0); + #[rustfmt::skip] + let r = i16x8::new(1, 2, 3, 4, 0, 0, 0, 0); - assert_eq!(r, __msa_nloc_h(a)); + assert_eq!(r, ::mem::transmute(__msa_nloc_h(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nloc_w() { #[rustfmt::skip] - let a = i32x4( + let a = i32x4::new( i32::min_value(), -1073741824, 1073741824, i32::max_value() ); - let r = i32x4(1, 2, 0, 0); + #[rustfmt::skip] + let r = i32x4::new(1, 2, 0, 0); - assert_eq!(r, __msa_nloc_w(a)); + assert_eq!(r, ::mem::transmute(__msa_nloc_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nloc_d() { #[rustfmt::skip] - let a = i64x2(i64::min_value(), i64::max_value()); - let r = i64x2(1, 0); + let a = i64x2::new(i64::min_value(), i64::max_value()); + #[rustfmt::skip] + let r = i64x2::new(1, 0); - assert_eq!(r, __msa_nloc_d(a)); + assert_eq!(r, ::mem::transmute(__msa_nloc_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nlzc_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3 ); - assert_eq!(r, __msa_nlzc_b(a)); + assert_eq!(r, ::mem::transmute(__msa_nlzc_b(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nlzc_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 5, 6, 7, 8 ); - let r = i16x8(15, 14, 14, 13, 13, 13, 13, 12); + #[rustfmt::skip] + let r = i16x8::new(15, 14, 14, 13, 13, 13, 13, 12); - assert_eq!(r, __msa_nlzc_h(a)); + assert_eq!(r, ::mem::transmute(__msa_nlzc_h(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nlzc_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); - let r = i32x4(31, 30, 30, 29); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = i32x4::new(31, 30, 30, 29); - assert_eq!(r, __msa_nlzc_w(a)); + assert_eq!(r, ::mem::transmute(__msa_nlzc_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nlzc_d() { #[rustfmt::skip] - let a = i64x2(1, 2); - let r = i64x2(63, 62); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let r = i64x2::new(63, 62); - assert_eq!(r, __msa_nlzc_d(a)); + assert_eq!(r, ::mem::transmute(__msa_nlzc_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_nor_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239 ); - assert_eq!(r, __msa_nor_v(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_nor_v(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_nori_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 250, 249, 248, 251, 250, 249, 248, 243, 242, 241, 240, 243, 242, 241, 240, 235 ); - assert_eq!(r, __msa_nori_b(a, 4)); + assert_eq!(r, ::mem::transmute(__msa_nori_b(::mem::transmute(a), 4))); } #[simd_test(enable = "msa")] unsafe fn test_msa_or_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - assert_eq!(r, __msa_or_v(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_or_v(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_ori_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 5, 6, 7, 4, 5, 6, 7, 12, 13, 14, 15, 12, 13, 14, 15, 20 ); - assert_eq!(r, __msa_ori_b(a, 4)); + assert_eq!(r, ::mem::transmute(__msa_ori_b(::mem::transmute(a), 4))); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckev_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 4, 2, 4, 2, 4, 2, 4, 2, 1, 3, 1, 3, 1, 3, 1, 3 ); - assert_eq!(r, __msa_pckev_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckev_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckev_h() { #[rustfmt::skip] - let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = i16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = i16x8(4, 3, 2, 1, 4, 3, 2, 1); - let r = i16x8(4, 2, 4, 2, 1, 3, 1, 3); + let b = i16x8::new(4, 3, 2, 1, 4, 3, 2, 1); + #[rustfmt::skip] + let r = i16x8::new(4, 2, 4, 2, 1, 3, 1, 3); - assert_eq!(r, __msa_pckev_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckev_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckev_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(4, 2, 1, 3); + let b = i32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = i32x4::new(4, 2, 1, 3); - assert_eq!(r, __msa_pckev_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckev_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckev_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(4, 3); - let r = i64x2(4, 1); + let b = i64x2::new(4, 3); + #[rustfmt::skip] + let r = i64x2::new(4, 1); - assert_eq!(r, __msa_pckev_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckev_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckod_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 3, 1, 3, 1, 3, 1, 3, 1, 2, 4, 2, 4, 2, 4, 2, 4 ); - assert_eq!(r, __msa_pckod_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckod_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckod_h() { #[rustfmt::skip] - let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = i16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = i16x8(4, 3, 2, 1, 4, 3, 2, 1); - let r = i16x8(3, 1, 3, 1, 2, 4, 2, 4); + let b = i16x8::new(4, 3, 2, 1, 4, 3, 2, 1); + #[rustfmt::skip] + let r = i16x8::new(3, 1, 3, 1, 2, 4, 2, 4); - assert_eq!(r, __msa_pckod_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckod_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckod_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(3, 1, 2, 4); + let b = i32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = i32x4::new(3, 1, 2, 4); - assert_eq!(r, __msa_pckod_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckod_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pckod_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(4, 3); - let r = i64x2(3, 2); + let b = i64x2::new(4, 3); + #[rustfmt::skip] + let r = i64x2::new(3, 2); - assert_eq!(r, __msa_pckod_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_pckod_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_pcnt_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, -64, -32, -16, -8, -4, -2, -1, 1, 2, 4, 8, 16, 32, 64, 127 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 1, 1, 1, 1, 1, 1, 1, 7 ); - assert_eq!(r, __msa_pcnt_b(a)); + assert_eq!(r, ::mem::transmute(__msa_pcnt_b(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_pcnt_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( -32768, -16384, -8192, -4096, 4096, 8192, 16384, 32767 ); - let r = i16x8(1, 2, 3, 4, 1, 1, 1, 15); + #[rustfmt::skip] + let r = i16x8::new(1, 2, 3, 4, 1, 1, 1, 15); - assert_eq!(r, __msa_pcnt_h(a)); + assert_eq!(r, ::mem::transmute(__msa_pcnt_h(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_pcnt_w() { #[rustfmt::skip] - let a = i32x4( + let a = i32x4::new( i32::min_value(), -1073741824, 1073741824, i32::max_value() ); - let r = i32x4(1, 2, 1, 31); + #[rustfmt::skip] + let r = i32x4::new(1, 2, 1, 31); - assert_eq!(r, __msa_pcnt_w(a)); + assert_eq!(r, ::mem::transmute(__msa_pcnt_w(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_pcnt_d() { #[rustfmt::skip] - let a = i64x2(-2147483648, 2147483647); - let r = i64x2(33, 31); + let a = i64x2::new(-2147483648, 2147483647); + #[rustfmt::skip] + let r = i64x2::new(33, 31); - assert_eq!(r, __msa_pcnt_d(a)); + assert_eq!(r, ::mem::transmute(__msa_pcnt_d(::mem::transmute(a)))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( i8::max_value(), 105, 30, 1, i8::max_value(), 105, 30, 1, i8::max_value(), 105, 30, 1, i8::max_value(), 105, 30, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1 ); - assert_eq!(r, __msa_sat_s_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_sat_s_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_s_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 1155, 155, 1, i16::max_value(), 1155, 155, 1 ); - let r = i16x8(127, 127, 127, 1, 127, 127, 127, 1); + #[rustfmt::skip] + let r = i16x8::new(127, 127, 127, 1, 127, 127, 127, 1); - assert_eq!(r, __msa_sat_s_h(a, 7)); + assert_eq!(r, ::mem::transmute(__msa_sat_s_h(::mem::transmute(a), 7))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_s_w() { #[rustfmt::skip] - let a = i32x4(i32::max_value(), 111111155, i32::max_value(), 1); - let r = i32x4(131071, 131071, 131071, 1); + let a = i32x4::new(i32::max_value(), 111111155, i32::max_value(), 1); + #[rustfmt::skip] + let r = i32x4::new(131071, 131071, 131071, 1); - assert_eq!(r, __msa_sat_s_w(a, 17)); + assert_eq!(r, ::mem::transmute(__msa_sat_s_w(::mem::transmute(a), 17))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_s_d() { #[rustfmt::skip] - let a = i64x2(i64::max_value(), 1); - let r = i64x2(137438953471, 1); + let a = i64x2::new(i64::max_value(), 1); + #[rustfmt::skip] + let r = i64x2::new(137438953471, 1); - assert_eq!(r, __msa_sat_s_d(a, 37)); + assert_eq!(r, ::mem::transmute(__msa_sat_s_d(::mem::transmute(a), 37))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 105, 30, 1, u8::max_value(), 105, 30, 1, u8::max_value(), 105, 30, 1, u8::max_value(), 105, 30, 1 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 7, 7, 7, 1, 7, 7, 7, 1, 7, 7, 7, 1, 7, 7, 7, 1 ); - assert_eq!(r, __msa_sat_u_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_sat_u_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( u16::max_value(), 1155, 155, 1, u16::max_value(), 1155, 155, 1 ); - let r = u16x8(255, 255, 155, 1, 255, 255, 155, 1); + #[rustfmt::skip] + let r = u16x8::new(255, 255, 155, 1, 255, 255, 155, 1); - assert_eq!(r, __msa_sat_u_h(a, 7)); + assert_eq!(r, ::mem::transmute(__msa_sat_u_h(::mem::transmute(a), 7))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_u_w() { #[rustfmt::skip] - let a = u32x4(u32::max_value(), 111111155, u32::max_value(), 1); - let r = u32x4(262143, 262143, 262143, 1); + let a = u32x4::new(u32::max_value(), 111111155, u32::max_value(), 1); + #[rustfmt::skip] + let r = u32x4::new(262143, 262143, 262143, 1); - assert_eq!(r, __msa_sat_u_w(a, 17)); + assert_eq!(r, ::mem::transmute(__msa_sat_u_w(::mem::transmute(a), 17))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sat_u_d() { #[rustfmt::skip] - let a = u64x2(u64::max_value(), 1); - let r = u64x2(274877906943, 1); + let a = u64x2::new(u64::max_value(), 1); + #[rustfmt::skip] + let r = u64x2::new(274877906943, 1); - assert_eq!(r, __msa_sat_u_d(a, 37)); + assert_eq!(r, ::mem::transmute(__msa_sat_u_d(::mem::transmute(a), 37))); } #[simd_test(enable = "msa")] unsafe fn test_msa_shf_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 11, 12, 3, 4, 11, 12, 3, 4, 11, 12, 3, 4, 11, 12, 3, 4 ); - - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 11, 3, 4, 12, 11, 3, 4, 12, 11, 3, 4, 12, 11, 3, 4, 12 ); - assert_eq!(r, __msa_shf_b(a, 120)); + assert_eq!(r, ::mem::transmute(__msa_shf_b(::mem::transmute(a), 120))); } #[simd_test(enable = "msa")] unsafe fn test_msa_shf_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 11, 12, 13, 14, 11, 12, 13, 14 ); + #[rustfmt::skip] + let r = i16x8::new(11, 14, 12, 13, 11, 14, 12, 13); - let r = i16x8(11, 14, 12, 13, 11, 14, 12, 13); - - assert_eq!(r, __msa_shf_h(a, 156)); + assert_eq!(r, ::mem::transmute(__msa_shf_h(::mem::transmute(a), 156))); } #[simd_test(enable = "msa")] unsafe fn test_msa_shf_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); - - let r = i32x4(1, 3, 2, 4); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = i32x4::new(1, 3, 2, 4); - assert_eq!(r, __msa_shf_w(a, 216)); + assert_eq!(r, ::mem::transmute(__msa_shf_w(::mem::transmute(a), 216))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sld_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4 ); - assert_eq!(r, __msa_sld_b(a, b, 5)); + assert_eq!( + r, + ::mem::transmute(__msa_sld_b(::mem::transmute(a), ::mem::transmute(b), 5)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sld_h() { #[rustfmt::skip] - let a = i16x8(0, 1, 2, 3, 4, 5, 6, 7); + let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); #[rustfmt::skip] - let b = i16x8(8, 9, 10, 11, 12, 13, 14, 15); + let b = i16x8::new(8, 9, 10, 11, 12, 13, 14, 15); // let c = 5 as i32; - let r = i16x8(9, 10, 11, 0, 13, 14, 15, 4); + let r = i16x8::new(9, 10, 11, 0, 13, 14, 15, 4); - assert_eq!(r, __msa_sld_h(a, b, 2)); + assert_eq!( + r, + ::mem::transmute(__msa_sld_h(::mem::transmute(a), ::mem::transmute(b), 2)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sld_w() { #[rustfmt::skip] - let a = i32x4(0, 1, 2, 3); + let a = i32x4::new(0, 1, 2, 3); #[rustfmt::skip] - let b = i32x4(4, 5, 6, 7); - - let r = i32x4(4, 5, 6, 7); + let b = i32x4::new(4, 5, 6, 7); + #[rustfmt::skip] + let r = i32x4::new(4, 5, 6, 7); - assert_eq!(r, __msa_sld_w(a, b, 4)); + assert_eq!( + r, + ::mem::transmute(__msa_sld_w(::mem::transmute(a), ::mem::transmute(b), 4)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sld_d() { #[rustfmt::skip] - let a = i64x2(0, 1); + let a = i64x2::new(0, 1); #[rustfmt::skip] - let b = i64x2(2, 3); - - let r = i64x2(2, 3); + let b = i64x2::new(2, 3); + #[rustfmt::skip] + let r = i64x2::new(2, 3); - assert_eq!(r, __msa_sld_d(a, b, 2)); + assert_eq!( + r, + ::mem::transmute(__msa_sld_d(::mem::transmute(a), ::mem::transmute(b), 2)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sldi_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4 ); - assert_eq!(r, __msa_sldi_b(a, b, 5)); + assert_eq!( + r, + ::mem::transmute(__msa_sldi_b(::mem::transmute(a), ::mem::transmute(b), 5)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sldi_h() { #[rustfmt::skip] - let a = i16x8(0, 1, 2, 3, 4, 5, 6, 7); + let a = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7); #[rustfmt::skip] - let b = i16x8(8, 9, 10, 11, 12, 13, 14, 15); + let b = i16x8::new(8, 9, 10, 11, 12, 13, 14, 15); // let c = 5 as i32; - let r = i16x8(9, 10, 11, 0, 13, 14, 15, 4); + let r = i16x8::new(9, 10, 11, 0, 13, 14, 15, 4); - assert_eq!(r, __msa_sldi_h(a, b, 2)); + assert_eq!( + r, + ::mem::transmute(__msa_sldi_h(::mem::transmute(a), ::mem::transmute(b), 2)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sldi_w() { #[rustfmt::skip] - let a = i32x4(0, 1, 2, 3); + let a = i32x4::new(0, 1, 2, 3); + #[rustfmt::skip] + let b = i32x4::new(4, 5, 6, 7); #[rustfmt::skip] - let b = i32x4(4, 5, 6, 7); - - let r = i32x4(4, 5, 6, 7); + let r = i32x4::new(4, 5, 6, 7); - assert_eq!(r, __msa_sldi_w(a, b, 4)); + assert_eq!( + r, + ::mem::transmute(__msa_sldi_w(::mem::transmute(a), ::mem::transmute(b), 4)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sldi_d() { #[rustfmt::skip] - let a = i64x2(0, 1); + let a = i64x2::new(0, 1); #[rustfmt::skip] - let b = i64x2(2, 3); - - let r = i64x2(2, 3); + let b = i64x2::new(2, 3); + #[rustfmt::skip] + let r = i64x2::new(2, 3); - assert_eq!(r, __msa_sldi_d(a, b, 2)); + assert_eq!( + r, + ::mem::transmute(__msa_sldi_d(::mem::transmute(a), ::mem::transmute(b), 2)) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sll_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 16, 16, 12, 8, 16, 16, 12, 8, 16, 16, 12, 8, 16, 16, 12, 8 ); - assert_eq!(r, __msa_sll_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sll_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sll_h() { #[rustfmt::skip] - let a = i16x8(1, 2, 3, 4, 1, 2, 3, 4); + let a = i16x8::new(1, 2, 3, 4, 1, 2, 3, 4); #[rustfmt::skip] - let b = i16x8(4, 3, 2, 1, 4, 3, 2, 1); - let r = i16x8(16, 16, 12, 8, 16, 16, 12, 8); + let b = i16x8::new(4, 3, 2, 1, 4, 3, 2, 1); + #[rustfmt::skip] + let r = i16x8::new(16, 16, 12, 8, 16, 16, 12, 8); - assert_eq!(r, __msa_sll_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sll_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sll_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); - let r = i32x4(16, 16, 12, 8); + let b = i32x4::new(4, 3, 2, 1); + #[rustfmt::skip] + let r = i32x4::new(16, 16, 12, 8); - assert_eq!(r, __msa_sll_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sll_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sll_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); #[rustfmt::skip] - let b = i64x2(4, 3); - let r = i64x2(16, 16); + let b = i64x2::new(4, 3); + #[rustfmt::skip] + let r = i64x2::new(16, 16); - assert_eq!(r, __msa_sll_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sll_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_slli_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 4, 8, 12, 16, 4, 8, 12, 16, 4, 8, 12, 16, 4, 8, 12, 16 ); - assert_eq!(r, __msa_slli_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_slli_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_slli_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = i16x8(4, 8, 12, 16, 4, 8, 12, 16); + #[rustfmt::skip] + let r = i16x8::new(4, 8, 12, 16, 4, 8, 12, 16); - assert_eq!(r, __msa_slli_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_slli_h(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_slli_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); - let r = i32x4(4, 8, 12, 16); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = i32x4::new(4, 8, 12, 16); - assert_eq!(r, __msa_slli_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_slli_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_slli_d() { #[rustfmt::skip] - let a = i64x2(1, 2); - let r = i64x2(2, 4); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let r = i64x2::new(2, 4); - assert_eq!(r, __msa_slli_d(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_slli_d(::mem::transmute(a), 1))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splat_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ); - assert_eq!(r, __msa_splat_b(a, 3)); + assert_eq!(r, ::mem::transmute(__msa_splat_b(::mem::transmute(a), 3))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splat_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 1, 2, 3, 4, ); + #[rustfmt::skip] + let r = i16x8::new(4, 4, 4, 4, 4, 4, 4, 4); - let r = i16x8(4, 4, 4, 4, 4, 4, 4, 4); - - assert_eq!(r, __msa_splat_h(a, 3)); + assert_eq!(r, ::mem::transmute(__msa_splat_h(::mem::transmute(a), 3))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splat_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); - - let r = i32x4(4, 4, 4, 4); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = i32x4::new(4, 4, 4, 4); - assert_eq!(r, __msa_splat_w(a, 3)); + assert_eq!(r, ::mem::transmute(__msa_splat_w(::mem::transmute(a), 3))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splat_d() { #[rustfmt::skip] - let a = i64x2(1, 2); - - let r = i64x2(2, 2); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let r = i64x2::new(2, 2); - assert_eq!(r, __msa_splat_d(a, 3)); + assert_eq!(r, ::mem::transmute(__msa_splat_d(::mem::transmute(a), 3))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splati_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ); - assert_eq!(r, __msa_splati_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_splati_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splati_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 1, 2, 3, 4, ); - let r = i16x8(3, 3, 3, 3, 3, 3, 3, 3); + #[rustfmt::skip] + let r = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3); - assert_eq!(r, __msa_splati_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_splati_h(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splati_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); - let r = i32x4(3, 3, 3, 3); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let r = i32x4::new(3, 3, 3, 3); - assert_eq!(r, __msa_splati_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_splati_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_splati_d() { #[rustfmt::skip] - let a = i64x2(1, 2); - let r = i64x2(2, 2); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let r = i64x2::new(2, 2); - assert_eq!(r, __msa_splati_d(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_splati_d(::mem::transmute(a), 1))); } #[simd_test(enable = "msa")] unsafe fn test_msa_sra_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, -64, -32, -16, -8, -4, -2, -1, 1, 2, 4, 8, 16, 32, 64, 127 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -128, -1, -1, -1, -1, -1, -1, -1, 1, 0, 0, 0, 1, 4, 16, 63 ); - assert_eq!(r, __msa_sra_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sra_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sra_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( -32768, -16384, -8192, -4096, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 15, 14, 13, 12, 12, 13, 14, 15 ); - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( -1, -1, -1, -1, 0, 0, 0, 0 ); - assert_eq!(r, __msa_sra_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sra_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sra_w() { #[rustfmt::skip] - let a = i32x4(i32::min_value(), -1073741824, 1, 2); + let a = i32x4::new(i32::min_value(), -1073741824, 1, 2); #[rustfmt::skip] - let b = i32x4(16, 15, 16, 15); - let r = i32x4(-32768, -32768, 0, 0); + let b = i32x4::new(16, 15, 16, 15); + #[rustfmt::skip] + let r = i32x4::new(-32768, -32768, 0, 0); - assert_eq!(r, __msa_sra_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sra_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_sra_d() { #[rustfmt::skip] - let a = i64x2(i64::min_value(), i64::max_value()); + let a = i64x2::new(i64::min_value(), i64::max_value()); #[rustfmt::skip] - let b = i64x2(32, 31); - let r = i64x2(-2147483648, 4294967295); + let b = i64x2::new(32, 31); + #[rustfmt::skip] + let r = i64x2::new(-2147483648, 4294967295); - assert_eq!(r, __msa_sra_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_sra_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srai_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( i8::max_value(), 125, 55, 1, i8::max_value(), 125, 55, 1, i8::max_value(), 125, 55, 1, i8::max_value(), 125, 55, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 31, 31, 13, 0, 31, 31, 13, 0, 31, 31, 13, 0, 31, 31, 13, 0 ); - assert_eq!(r, __msa_srai_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srai_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srai_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 125, 55, 1, i16::max_value(), 125, 55, 1 ); - let r = i16x8(8191, 31, 13, 0, 8191, 31, 13, 0); + #[rustfmt::skip] + let r = i16x8::new(8191, 31, 13, 0, 8191, 31, 13, 0); - assert_eq!(r, __msa_srai_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srai_h(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srai_w() { #[rustfmt::skip] - let a = i32x4(i32::max_value(), 125, 55, 1); - let r = i32x4(536870911, 31, 13, 0); + let a = i32x4::new(i32::max_value(), 125, 55, 1); + let r = i32x4::new(536870911, 31, 13, 0); - assert_eq!(r, __msa_srai_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srai_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srai_d() { #[rustfmt::skip] - let a = i64x2(i64::max_value(), 55); - let r = i64x2(2305843009213693951, 13); + let a = i64x2::new(i64::max_value(), 55); + #[rustfmt::skip] + let r = i64x2::new(2305843009213693951, 13); - assert_eq!(r, __msa_srai_d(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srai_d(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srar_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, -64, -32, -16, -8, -4, -2, -1, 1, 2, 4, 8, 16, 32, 64, 127 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -8, -8, -8, -8, 0, 0, 0, 0, 1, 0, 0, 0, 1, 4, 16, 64 ); - assert_eq!(r, __msa_srar_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srar_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srar_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::min_value(), -16384, -8192, -4096, 150, 50, 25, 15 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 4, 3, 2, 1, 1, 2, 3, 4 ); - let r = i16x8(-2048, -2048, -2048, -2048, 75, 13, 3, 1); + #[rustfmt::skip] + let r = i16x8::new( + -2048, -2048, -2048, -2048, + 75, 13, 3, 1 + ); - assert_eq!(r, __msa_srar_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srar_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srar_w() { #[rustfmt::skip] - let a = i32x4(i32::min_value(), -1073741824, 100, 50); + let a = i32x4::new(i32::min_value(), -1073741824, 100, 50); #[rustfmt::skip] - let b = i32x4(16, 15, 1, 2); - let r = i32x4(-32768, -32768, 50, 13); + let b = i32x4::new(16, 15, 1, 2); + #[rustfmt::skip] + let r = i32x4::new(-32768, -32768, 50, 13); - assert_eq!(r, __msa_srar_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srar_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srar_d() { #[rustfmt::skip] - let a = i64x2(i64::min_value(), i64::max_value()); + let a = i64x2::new(i64::min_value(), i64::max_value()); #[rustfmt::skip] - let b = i64x2(32, 31); - let r = i64x2(-2147483648, 4294967296); + let b = i64x2::new(32, 31); + #[rustfmt::skip] + let r = i64x2::new(-2147483648, 4294967296); - assert_eq!(r, __msa_srar_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srar_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srari_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 125, i8::max_value(), 55, 1, 125, i8::max_value(), 55, 1, 125, i8::max_value(), 55, 1, 125, i8::max_value(), 55, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 31, 32, 14, 0, 31, 32, 14, 0, 31, 32, 14, 0, 31, 32, 14, 0 ); - assert_eq!(r, __msa_srari_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srari_b(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srari_h() { #[rustfmt::skip] - let a = i16x8(2155, 1155, 155, 1, 2155, 1155, 155, 1); - let r = i16x8(539, 289, 39, 0, 539, 289, 39, 0); + let a = i16x8::new(2155, 1155, 155, 1, 2155, 1155, 155, 1); + #[rustfmt::skip] + let r = i16x8::new(539, 289, 39, 0, 539, 289, 39, 0); - assert_eq!(r, __msa_srari_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srari_h(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srari_w() { #[rustfmt::skip] - let a = i32x4(211111155, 111111155, 11111155, 1); - let r = i32x4(52777789, 27777789, 2777789, 0); + let a = i32x4::new(211111155, 111111155, 11111155, 1); + #[rustfmt::skip] + let r = i32x4::new(52777789, 27777789, 2777789, 0); - assert_eq!(r, __msa_srari_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srari_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srari_d() { #[rustfmt::skip] - let a = i64x2(211111111155, 111111111155); - let r = i64x2(52777777789, 27777777789); + let a = i64x2::new(211111111155, 111111111155); + #[rustfmt::skip] + let r = i64x2::new(52777777789, 27777777789); - assert_eq!(r, __msa_srari_d(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srari_d(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srl_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, -64, -32, -16, -8, -4, -2, -1, 1, 2, 4, 8, 16, 32, 64, 127 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -128, 1, 3, 7, 15, 31, 63, 127, 1, 0, 0, 0, 1, 4, 16, 63 ); - assert_eq!(r, __msa_srl_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srl_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srl_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( -32768, -16384, -8192, -4096, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 15, 14, 13, 12, 4, 3, 2, 1 ); - let r = i16x8(1, 3, 7, 15, 0, 0, 0, 2); + #[rustfmt::skip] + let r = i16x8::new(1, 3, 7, 15, 0, 0, 0, 2); - assert_eq!(r, __msa_srl_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srl_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srl_w() { #[rustfmt::skip] - let a = i32x4(i32::min_value(), -1073741824, 1, 2); + let a = i32x4::new(i32::min_value(), -1073741824, 1, 2); #[rustfmt::skip] - let b = i32x4(16, 15, 16, 15); - let r = i32x4(32768, 98304, 0, 0); + let b = i32x4::new(16, 15, 16, 15); + #[rustfmt::skip] + let r = i32x4::new(32768, 98304, 0, 0); - assert_eq!(r, __msa_srl_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srl_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srl_d() { #[rustfmt::skip] - let a = i64x2(i64::min_value(), i64::max_value()); + let a = i64x2::new(i64::min_value(), i64::max_value()); #[rustfmt::skip] - let b = i64x2(32, 31); - let r = i64x2(2147483648, 4294967295); + let b = i64x2::new(32, 31); + #[rustfmt::skip] + let r = i64x2::new(2147483648, 4294967295); - assert_eq!(r, __msa_srl_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srl_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_srli_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 25, 50, 100, 127, 25, 50, 100, 127, 25, 50, 100, 127, 25, 50, 100, 127 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 6, 12, 25, 31, 6, 12, 25, 31, 6, 12, 25, 31, 6, 12, 25, 31 ); - assert_eq!(r, __msa_srli_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srli_b(::mem::transmute(a), 2))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_srli_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 3276, 100, 127, i16::max_value(), 3276, 100, 127 ); - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( 8191, 819, 25, 31, 8191, 819, 25, 31 ); - assert_eq!(r, __msa_srli_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srli_h(::mem::transmute(a), 2))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_srli_w() { #[rustfmt::skip] - let a = i32x4(100, i32::max_value(), 100, i32::max_value()); - let r = i32x4(25, 536870911, 25, 536870911); + let a = i32x4::new(100, i32::max_value(), 100, i32::max_value()); + #[rustfmt::skip] + let r = i32x4::new(25, 536870911, 25, 536870911); - assert_eq!(r, __msa_srli_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srli_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srli_d() { #[rustfmt::skip] - let a = i64x2(100, i64::max_value()); + let a = i64x2::new(100, i64::max_value()); #[rustfmt::skip] - let r = i64x2(50, 4611686018427387903); + let r = i64x2::new(50, 4611686018427387903); - assert_eq!(r, __msa_srli_d(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_srli_d(::mem::transmute(a), 1))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srlr_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( -128, -64, -32, -16, -8, -4, -2, -1, 1, 2, 4, 8, 16, 32, 64, 127 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( -128, 2, 4, 8, 16, 32, 64, -128, 1, 0, 0, 0, 1, 4, 16, 64 ); - assert_eq!(r, __msa_srlr_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srlr_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srlr_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( -32768, -16384, -8192, -4096, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 15, 14, 13, 12, 4, 3, 2, 1 ); - let r = i16x8(1, 3, 7, 15, 0, 0, 1, 2); + #[rustfmt::skip] + let r = i16x8::new(1, 3, 7, 15, 0, 0, 1, 2); - assert_eq!(r, __msa_srlr_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srlr_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srlr_w() { #[rustfmt::skip] - let a = i32x4(i32::min_value(), -1073741824, 1, 2); + let a = i32x4::new(i32::min_value(), -1073741824, 1, 2); #[rustfmt::skip] - let b = i32x4(16, 15, 16, 15); - let r = i32x4(32768, 98304, 0, 0); + let b = i32x4::new(16, 15, 16, 15); + let r = i32x4::new(32768, 98304, 0, 0); - assert_eq!(r, __msa_srlr_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srlr_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_srlr_d() { #[rustfmt::skip] - let a = i64x2(i64::min_value(), i64::max_value()); + let a = i64x2::new(i64::min_value(), i64::max_value()); + #[rustfmt::skip] + let b = i64x2::new(32, 31); #[rustfmt::skip] - let b = i64x2(32, 31); - let r = i64x2(2147483648, 4294967296); + let r = i64x2::new(2147483648, 4294967296); - assert_eq!(r, __msa_srlr_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_srlr_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_srlri_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 25, 50, 100, i8::max_value(), 25, 50, 100, i8::max_value(), 25, 50, 100, i8::max_value(), 25, 50, 100, i8::max_value() ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 6, 13, 25, 32, 6, 13, 25, 32, 6, 13, 25, 32, 6, 13, 25, 32 ); - assert_eq!(r, __msa_srlri_b(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srlri_b(::mem::transmute(a), 2))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_srlri_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 3276, 100, 127, i16::max_value(), 3276, 100, 127 ); - let r = i16x8(8192, 819, 25, 32, 8192, 819, 25, 32); + let r = i16x8::new(8192, 819, 25, 32, 8192, 819, 25, 32); - assert_eq!(r, __msa_srlri_h(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srlri_h(::mem::transmute(a), 2))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_srlri_w() { #[rustfmt::skip] - let a = i32x4(100, 150, 200, i32::max_value()); - let r = i32x4(25, 38, 50, 536870912); + let a = i32x4::new(100, 150, 200, i32::max_value()); + #[rustfmt::skip] + let r = i32x4::new(25, 38, 50, 536870912); - assert_eq!(r, __msa_srlri_w(a, 2)); + assert_eq!(r, ::mem::transmute(__msa_srlri_w(::mem::transmute(a), 2))); } #[simd_test(enable = "msa")] unsafe fn test_msa_srlri_d() { #[rustfmt::skip] - let a = i64x2(100, i64::max_value()); + let a = i64x2::new(100, i64::max_value()); #[rustfmt::skip] - let r = i64x2(50, 4611686018427387904); + let r = i64x2::new(50, 4611686018427387904); - assert_eq!(r, __msa_srlri_d(a, 1)); + assert_eq!(r, ::mem::transmute(__msa_srlri_d(::mem::transmute(a), 1))); } - - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_st_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 ); + #[rustfmt::skip] let mut arr : [i8; 16] = [ 0, 0, 0, 0, 0, 0, 0, 0, @@ -16154,532 +17814,651 @@ mod tests { 21, 22, 23, 24, 25, 26, 27, 28 ]; - __msa_st_b(a, arr.as_mut_ptr(), 0); + __msa_st_b(::mem::transmute(a), arr.as_mut_ptr(), 0); assert_eq!(arr, r); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_st_h() { #[rustfmt::skip] - let a = i16x8(13, 14, 15, 16, 17, 18, 19, 20); - let mut arr : [i16; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; + let a = i16x8::new(13, 14, 15, 16, 17, 18, 19, 20); + let mut arr: [i16; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; #[rustfmt::skip] let r : [i16; 8] = [13, 14, 15, 16, 17, 18, 19, 20]; - __msa_st_h(a, arr.as_mut_ptr() as *mut i8, 0); + __msa_st_h(::mem::transmute(a), arr.as_mut_ptr() as *mut i8, 0); assert_eq!(arr, r); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_st_w() { #[rustfmt::skip] - let a = i32x4(13, 14, 15, 16); - let mut arr : [i32; 4] = [0, 0, 0, 0]; + let a = i32x4::new(13, 14, 15, 16); + let mut arr: [i32; 4] = [0, 0, 0, 0]; #[rustfmt::skip] let r : [i32; 4] = [13, 14, 15, 16]; - __msa_st_w(a, arr.as_mut_ptr() as *mut i8, 0); + __msa_st_w(::mem::transmute(a), arr.as_mut_ptr() as *mut i8, 0); assert_eq!(arr, r); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_st_d() { #[rustfmt::skip] - let a = i64x2(13, 14); - let mut arr : [i64; 2] = [0, 0]; + let a = i64x2::new(13, 14); + let mut arr: [i64; 2] = [0, 0]; #[rustfmt::skip] let r : [i64; 2] = [13, 14]; - __msa_st_d(a, arr.as_mut_ptr() as *mut i8, 0); + __msa_st_d(::mem::transmute(a), arr.as_mut_ptr() as *mut i8, 0); assert_eq!(arr, r); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_s_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( i8::min_value(), -2, -3, -4, i8::min_value(), -2, -3, -4, i8::min_value(), -2, -3, -4, i8::min_value(), -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 6, -7, 8, -9, 6, -7, 8, -9, 6, -7, 8, -9, 6, -7, 8, -9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( i8::min_value(), 5, -11, 5, i8::min_value(), 5, -11, 5, i8::min_value(), 5, -11, 5, i8::min_value(), 5, -11, 5 ); - assert_eq!(r, __msa_subs_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_s_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::min_value(), -2, -3, -4, i16::min_value(), -2, -3, -4 ); #[rustfmt::skip] - let b = i16x8(6, -7, 8, -9, 6, -7, 8, -9); - let r = i16x8( + let b = i16x8::new(6, -7, 8, -9, 6, -7, 8, -9); + #[rustfmt::skip] + let r = i16x8::new( i16::min_value(), 5, -11, 5, i16::min_value(), 5, -11, 5 ); - assert_eq!(r, __msa_subs_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_s_w() { #[rustfmt::skip] - let a = i32x4(i32::min_value(), -2, -3, -4); + let a = i32x4::new(i32::min_value(), -2, -3, -4); #[rustfmt::skip] - let b = i32x4(6, -7, 8, -9); - let r = i32x4(i32::min_value(), 5, -11, 5); + let b = i32x4::new(6, -7, 8, -9); + #[rustfmt::skip] + let r = i32x4::new(i32::min_value(), 5, -11, 5); - assert_eq!(r, __msa_subs_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_s_d() { #[rustfmt::skip] - let a = i64x2(i64::min_value(), -2); + let a = i64x2::new(i64::min_value(), -2); #[rustfmt::skip] - let b = i64x2(6, -7); - let r = i64x2(i64::min_value(), 5); + let b = i64x2::new(6, -7); + #[rustfmt::skip] + let r = i64x2::new(i64::min_value(), 5); - assert_eq!(r, __msa_subs_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9, 6, 7, 8, 9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 249, 0, 0, 0, 249, 0, 0, 0, 249, 0, 0, 0, 249, 0, 0, 0 ); - assert_eq!(r, __msa_subs_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( u16::max_value(), 2, 3, 4, u16::max_value(), 2, 3, 4 ); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 9, 6, 7, 8, 9); - let r = u16x8(65529, 0, 0, 0, 65529, 0, 0, 0); + let b = u16x8::new(6, 7, 8, 9, 6, 7, 8, 9); + #[rustfmt::skip] + let r = u16x8::new(65529, 0, 0, 0, 65529, 0, 0, 0); - assert_eq!(r, __msa_subs_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_u_w() { #[rustfmt::skip] - let a = u32x4(u32::max_value(), 2, 3, 4); + let a = u32x4::new(u32::max_value(), 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 9); - let r = u32x4(4294967289, 0, 0, 0); + let b = u32x4::new(6, 7, 8, 9); + #[rustfmt::skip] + let r = u32x4::new(4294967289, 0, 0, 0); - assert_eq!(r, __msa_subs_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subs_u_d() { #[rustfmt::skip] - let a = u64x2(u64::max_value(), 2); + let a = u64x2::new(u64::max_value(), 2); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = u64x2(18446744073709551609, 0); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = u64x2::new(18446744073709551609, 0); - assert_eq!(r, __msa_subs_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subs_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsus_u_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9, -6, -7, -8, -9 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 255, 9, 11, 13, 255, 9, 11, 13, 255, 9, 11, 13, 255, 9, 11, 13 ); - assert_eq!(r, __msa_subsus_u_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsus_u_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsus_u_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( u16::max_value(), 2, 3, 4, u16::max_value(), 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8(-6, -7, -8, -9, -6, -7, -8, -9); - let r = u16x8(65535, 9, 11, 13, 65535, 9, 11, 13); + let b = i16x8::new(-6, -7, -8, -9, -6, -7, -8, -9); + #[rustfmt::skip] + let r = u16x8::new(65535, 9, 11, 13, 65535, 9, 11, 13); - assert_eq!(r, __msa_subsus_u_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsus_u_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsus_u_w() { #[rustfmt::skip] - let a = u32x4(u32::max_value(), 2, 3, 4); + let a = u32x4::new(u32::max_value(), 2, 3, 4); #[rustfmt::skip] - let b = i32x4(-6, -7, -8, -9); - let r = u32x4(4294967295, 9, 11, 13); + let b = i32x4::new(-6, -7, -8, -9); + #[rustfmt::skip] + let r = u32x4::new(4294967295, 9, 11, 13); - assert_eq!(r, __msa_subsus_u_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsus_u_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsus_u_d() { #[rustfmt::skip] - let a = u64x2(u64::max_value(), 2); + let a = u64x2::new(u64::max_value(), 2); #[rustfmt::skip] - let b = i64x2(-6, -7); - let r = u64x2(18446744073709551615, 9); + let b = i64x2::new(-6, -7); + #[rustfmt::skip] + let r = u64x2::new(18446744073709551615, 9); - assert_eq!(r, __msa_subsus_u_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsus_u_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsuu_s_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4, u8::max_value(), 2, 3, 4 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 6, 7, 8, u8::max_value(), 6, 7, 8, u8::max_value(), 6, 7, 8, u8::max_value(), 6, 7, 8, u8::max_value() ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 127, -5, -5, -128, 127, -5, -5, -128, 127, -5, -5, -128, 127, -5, -5, -128 ); - assert_eq!(r, __msa_subsuu_s_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsuu_s_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsuu_s_h() { #[rustfmt::skip] - let a = u16x8( + let a = u16x8::new( u16::max_value(), 2, 3, 4, u16::max_value(), 2, 3, 4 ); #[rustfmt::skip] - let b = u16x8(6, 7, 8, 65535, 6, 7, 8, 65535); - let r = i16x8(32767, -5, -5, -32768, 32767, -5, -5, -32768); + let b = u16x8::new(6, 7, 8, 65535, 6, 7, 8, 65535); + #[rustfmt::skip] + let r = i16x8::new(32767, -5, -5, -32768, 32767, -5, -5, -32768); - assert_eq!(r, __msa_subsuu_s_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsuu_s_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsuu_s_w() { #[rustfmt::skip] - let a = u32x4(u32::max_value(), 2, 3, 4); + let a = u32x4::new(u32::max_value(), 2, 3, 4); #[rustfmt::skip] - let b = u32x4(6, 7, 8, 4294967295); - let r = i32x4(2147483647, -5, -5, -2147483648); + let b = u32x4::new(6, 7, 8, 4294967295); + #[rustfmt::skip] + let r = i32x4::new(2147483647, -5, -5, -2147483648); - assert_eq!(r, __msa_subsuu_s_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsuu_s_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subsuu_s_d() { #[rustfmt::skip] - let a = u64x2(u64::max_value(), 2); + let a = u64x2::new(u64::max_value(), 2); #[rustfmt::skip] - let b = u64x2(6, 7); - let r = i64x2(i64::max_value(), -5); + let b = u64x2::new(6, 7); + #[rustfmt::skip] + let r = i64x2::new(i64::max_value(), -5); - assert_eq!(r, __msa_subsuu_s_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subsuu_s_d(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subv_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( i8::min_value(), -2, -3, -4, i8::min_value(), -2, -3, -4, i8::min_value(), -2, -3, -4, i8::min_value(), -2, -3, -4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 6, -7, 8, -9, 6, -7, 8, -9, 6, -7, 8, -9, 6, -7, 8, -9 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 122, 5, -11, 5, 122, 5, -11, 5, 122, 5, -11, 5, 122, 5, -11, 5 ); - assert_eq!(r, __msa_subv_b(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subv_b(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subv_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::min_value(), -2, -3, -4, i16::min_value(), -2, -3, -4 ); #[rustfmt::skip] - let b = i16x8(6, -7, 8, -9, 6, -7, 8, -9); - let r = i16x8(32762, 5, -11, 5, 32762, 5, -11, 5); + let b = i16x8::new(6, -7, 8, -9, 6, -7, 8, -9); + #[rustfmt::skip] + let r = i16x8::new(32762, 5, -11, 5, 32762, 5, -11, 5); - assert_eq!(r, __msa_subv_h(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subv_h(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subv_w() { #[rustfmt::skip] - let a = i32x4(i32::min_value(), -2, -3, -4); + let a = i32x4::new(i32::min_value(), -2, -3, -4); #[rustfmt::skip] - let b = i32x4(6, -7, 8, -9); - let r = i32x4(2147483642, 5, -11, 5); + let b = i32x4::new(6, -7, 8, -9); + #[rustfmt::skip] + let r = i32x4::new(2147483642, 5, -11, 5); - assert_eq!(r, __msa_subv_w(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subv_w(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_subv_d() { #[rustfmt::skip] - let a = i64x2(i64::max_value(), -2); + let a = i64x2::new(i64::max_value(), -2); #[rustfmt::skip] - let b = i64x2(6, -7); - let r = i64x2(9223372036854775801, 5); + let b = i64x2::new(6, -7); + #[rustfmt::skip] + let r = i64x2::new(9223372036854775801, 5); - assert_eq!(r, __msa_subv_d(a, b)); + assert_eq!( + r, + ::mem::transmute(__msa_subv_d(::mem::transmute(a), ::mem::transmute(b))) + ); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_subvi_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 100, i8::max_value(), 50, i8::min_value(), 100, i8::max_value(), 50, i8::min_value(), 100, i8::max_value(), 50, i8::min_value(), 100, i8::max_value(), 50, i8::min_value() ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 95, 122, 45, 123, 95, 122, 45, 123, 95, 122, 45, 123, 95, 122, 45, 123 ); - assert_eq!(r, __msa_subvi_b(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_subvi_b(::mem::transmute(a), 5))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_subvi_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( i16::max_value(), 3276, -100, i16::min_value(), i16::max_value(), 3276, -100, i16::min_value() ); - let r = i16x8( + #[rustfmt::skip] + let r = i16x8::new( 32762, 3271, -105, 32763, 32762, 3271, -105, 32763 ); - assert_eq!(r, __msa_subvi_h(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_subvi_h(::mem::transmute(a), 5))); } - #[simd_test(enable = "msa")] + #[simd_test(enable = "msa")] unsafe fn test_msa_subvi_w() { #[rustfmt::skip] - let a = i32x4(100, 150, 200, i32::max_value()); - let r = i32x4(95, 145, 195, 2147483642); + let a = i32x4::new(100, 150, 200, i32::max_value()); + #[rustfmt::skip] + let r = i32x4::new(95, 145, 195, 2147483642); - assert_eq!(r, __msa_subvi_w(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_subvi_w(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_subvi_d() { #[rustfmt::skip] - let a = i64x2(100, i64::max_value()); + let a = i64x2::new(100, i64::max_value()); #[rustfmt::skip] - let r = i64x2(95, 9223372036854775802); + let r = i64x2::new(95, 9223372036854775802); - assert_eq!(r, __msa_subvi_d(a, 5)); + assert_eq!(r, ::mem::transmute(__msa_subvi_d(::mem::transmute(a), 5))); } #[simd_test(enable = "msa")] unsafe fn test_msa_vshf_b() { #[rustfmt::skip] - let a = i8x16( + let a = i8x16::new( 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i8x16( + let b = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); #[rustfmt::skip] - let c = i8x16( + let c = i8x16::new( 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i8x16( + #[rustfmt::skip] + let r = i8x16::new( 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4 ); - assert_eq!(r, __msa_vshf_b(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_vshf_b( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_vshf_h() { #[rustfmt::skip] - let a = i16x8( + let a = i16x8::new( 1, 2, 3, 4, 1, 2, 3, 4 ); #[rustfmt::skip] - let b = i16x8( + let b = i16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); #[rustfmt::skip] - let c = i16x8( + let c = i16x8::new( 4, 3, 2, 1, 4, 3, 2, 1 ); - let r = i16x8(3, 2, 1, 4, 3, 2, 1, 4); + let r = i16x8::new(3, 2, 1, 4, 3, 2, 1, 4); - assert_eq!(r, __msa_vshf_h(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_vshf_h( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_vshf_w() { #[rustfmt::skip] - let a = i32x4(1, 2, 3, 4); + let a = i32x4::new(1, 2, 3, 4); + #[rustfmt::skip] + let b = i32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let b = i32x4(4, 3, 2, 1); + let c = i32x4::new(4, 3, 2, 1); #[rustfmt::skip] - let c = i32x4(4, 3, 2, 1); - let r = i32x4(3, 2, 1, 4); + let r = i32x4::new(3, 2, 1, 4); - assert_eq!(r, __msa_vshf_w(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_vshf_w( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_vshf_d() { #[rustfmt::skip] - let a = i64x2(1, 2); + let a = i64x2::new(1, 2); + #[rustfmt::skip] + let b = i64x2::new(4, 3); #[rustfmt::skip] - let b = i64x2(4, 3); + let c = i64x2::new(4, 3); #[rustfmt::skip] - let c = i64x2(4, 3); - let r = i64x2(3, 4); + let r = i64x2::new(3, 4); - assert_eq!(r, __msa_vshf_d(a, b, c)); + assert_eq!( + r, + ::mem::transmute(__msa_vshf_d( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c) + )) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_xor_v() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); #[rustfmt::skip] - let b = u8x16( + let b = u8x16::new( 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 17, 13, 13, 9, 9, 13, 13, 1, 1, 13, 13, 9, 9, 13, 13, 17 ); - assert_eq!(r, __msa_xor_v(a,b)); + assert_eq!( + r, + ::mem::transmute(__msa_xor_v(::mem::transmute(a), ::mem::transmute(b))) + ); } #[simd_test(enable = "msa")] unsafe fn test_msa_xori_b() { #[rustfmt::skip] - let a = u8x16( + let a = u8x16::new( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); - let r = u8x16( + #[rustfmt::skip] + let r = u8x16::new( 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11, 20 ); - assert_eq!(r, __msa_xori_b(a, 4)); + assert_eq!(r, ::mem::transmute(__msa_xori_b(::mem::transmute(a), 4))); } } diff --git a/crates/core_arch/src/mips/msa/macros.rs b/crates/core_arch/src/mips/msa/macros.rs index c30583e854..a16a58dc1f 100644 --- a/crates/core_arch/src/mips/msa/macros.rs +++ b/crates/core_arch/src/mips/msa/macros.rs @@ -1,5 +1,6 @@ //! Utility macros. +//immediate value: -4096:4088 macro_rules! constify_imm_s13 { ($imm_s13:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -28,13 +29,13 @@ macro_rules! constify_imm_s13 { 168 => $expand!(168), 176 => $expand!(176), 184 => $expand!(184), - 192 => $expand!(192), - 200 => $expand!(200), - 208 => $expand!(208), - 216 => $expand!(216), - 224 => $expand!(224), - 232 => $expand!(232), - 240 => $expand!(240), + 192 => $expand!(192), + 200 => $expand!(200), + 208 => $expand!(208), + 216 => $expand!(216), + 224 => $expand!(224), + 232 => $expand!(232), + 240 => $expand!(240), 248 => $expand!(248), 256 => $expand!(256), 264 => $expand!(264), @@ -1027,11 +1028,12 @@ macro_rules! constify_imm_s13 { 8168 => $expand!(-24), 8176 => $expand!(-16), 8184 => $expand!(-8), - _ => $expand!(4088) - } - }; + _ => $expand!(4088), + } + }; } +//immediate value: -2048:2044 macro_rules! constify_imm_s12 { ($imm_s12:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -2058,11 +2060,12 @@ macro_rules! constify_imm_s12 { 4084 => $expand!(-12), 4088 => $expand!(-8), 4092 => $expand!(-4), - _ => $expand!(2044) - } - }; + _ => $expand!(2044), + } + }; } +//immediate value: -1024:1022 macro_rules! constify_imm_s11 { ($imm_s11:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -3090,12 +3093,12 @@ macro_rules! constify_imm_s11 { 2042 => $expand!(-6), 2044 => $expand!(-4), 2046 => $expand!(-2), - _ => $expand!(1022) + _ => $expand!(1022), } - }; + }; } - +//immediate value: -512:511 macro_rules! constify_imm_s10 { ($imm_s10:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -3124,13 +3127,13 @@ macro_rules! constify_imm_s10 { 21 => $expand!(21), 22 => $expand!(22), 23 => $expand!(23), - 24 => $expand!(24), - 25 => $expand!(25), - 26 => $expand!(26), - 27 => $expand!(27), - 28 => $expand!(28), - 29 => $expand!(29), - 30 => $expand!(30), + 24 => $expand!(24), + 25 => $expand!(25), + 26 => $expand!(26), + 27 => $expand!(27), + 28 => $expand!(28), + 29 => $expand!(29), + 30 => $expand!(30), 31 => $expand!(31), 32 => $expand!(32), 33 => $expand!(33), @@ -4123,12 +4126,12 @@ macro_rules! constify_imm_s10 { 1021 => $expand!(-3), 1022 => $expand!(-2), 1023 => $expand!(-1), - _ => $expand!(511) + _ => $expand!(511), } - }; + }; } - +//immediate value: 0:63 macro_rules! constify_imm6 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -4157,13 +4160,13 @@ macro_rules! constify_imm6 { 21 => $expand!(21), 22 => $expand!(22), 23 => $expand!(23), - 24 => $expand!(24), - 25 => $expand!(25), - 26 => $expand!(26), - 27 => $expand!(27), - 28 => $expand!(28), - 29 => $expand!(29), - 30 => $expand!(30), + 24 => $expand!(24), + 25 => $expand!(25), + 26 => $expand!(26), + 27 => $expand!(27), + 28 => $expand!(28), + 29 => $expand!(29), + 30 => $expand!(30), 31 => $expand!(31), 32 => $expand!(32), 33 => $expand!(33), @@ -4197,11 +4200,11 @@ macro_rules! constify_imm6 { 61 => $expand!(61), 62 => $expand!(62), _ => $expand!(63), - - } - }; -} + } + }; +} +//immediate value: 0:31 macro_rules! constify_imm5 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -4242,11 +4245,12 @@ macro_rules! constify_imm5 { }; } +//immediate value: -16:15 macro_rules! constify_imm_s5 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] - match ($imm8) & 0b1_1111 { - 0 => $expand!(0), + match ($imm8) & 0b1_1111 { + 0 => $expand!(0), 1 => $expand!(1), 2 => $expand!(2), 3 => $expand!(3), @@ -4267,22 +4271,22 @@ macro_rules! constify_imm_s5 { 19 => $expand!(-13), 20 => $expand!(-12), 21 => $expand!(-11), - 22 => $expand!(-10), - 23 => $expand!(-9), - 24 => $expand!(-8), - 25 => $expand!(-7), - 26 => $expand!(-6), - 27 => $expand!(-5), - 28 => $expand!(-4), - 29 => $expand!(-3), + 22 => $expand!(-10), + 23 => $expand!(-9), + 24 => $expand!(-8), + 25 => $expand!(-7), + 26 => $expand!(-6), + 27 => $expand!(-5), + 28 => $expand!(-4), + 29 => $expand!(-3), 30 => $expand!(-2), 31 => $expand!(-1), - _ => $expand!(15) - + _ => $expand!(15), } }; } +//immediate value: 0:15 macro_rules! constify_imm4 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -4307,6 +4311,7 @@ macro_rules! constify_imm4 { }; } +//immediate value: 0:7 macro_rules! constify_imm3 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -4323,6 +4328,7 @@ macro_rules! constify_imm3 { }; } +//immediate value: 0:3 macro_rules! constify_imm2 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] @@ -4335,13 +4341,13 @@ macro_rules! constify_imm2 { }; } +//immediate value: 0:1 macro_rules! constify_imm1 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] match ($imm8) & 0b1 { 0 => $expand!(0), - _ => $expand!(1) + _ => $expand!(1), } }; } - From 15f02069a4db74e9c55d41de360d87a99ed52fdf Mon Sep 17 00:00:00 2001 From: Radovan Birdic Date: Mon, 8 Apr 2019 08:59:43 +0000 Subject: [PATCH 3/3] Added msa jobs for mips*-gnu* targets --- ci/run.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ci/run.sh b/ci/run.sh index bcf9f38909..aaaac176c1 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -23,6 +23,10 @@ case ${TARGET} in i686-* | i586-*) export RUSTFLAGS="${RUSTFLAGS} -C relocation-model=static -Z plt=yes" ;; + #Unoptimized build uses fast-isel which breaks with msa + mips-* | mipsel-*) + export RUSTFLAGS="${RUSTFLAGS} -C llvm-args=-fast-isel=false" + ;; esac echo "RUSTFLAGS=${RUSTFLAGS}" @@ -75,6 +79,12 @@ case ${TARGET} in export RUSTFLAGS="${RUSTFLAGS} -C target-feature=+simd128,+unimplemented-simd128" cargo_test "--release --no-run" ;; + mips-*gnu* | mipsel-*gnu*) + export RUSTFLAGS="${RUSTFLAGS} -C target-feature=+msa,+fp64,+mips32r5" + ;; + mips64*) + export RUSTFLAGS="${RUSTFLAGS} -C target-feature=+msa" + ;; *) ;;